Commit d4959c5d authored by Julius Rominger's avatar Julius Rominger

Upload Files

parent 1a224ffa
import torch
from torch import autograd, nn, optim
import torch.nn.functional as F
from tqdm import tqdm
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(2, 32, 3)
self.conv2 = nn.Conv2d(32, 64, 3)
#self.dropout1 = nn.Dropout2d(0.25)
#self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(576, 512)
self.fc2 = nn.Linear(512, 100)
# x represents our data
def forward(self, x):
# Pass data through conv1
x = self.conv1(x)
# Use the rectified-linear activation function over x
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
# Run max pooling over x
x = F.max_pool2d(x, 2)
# Pass data through dropout1
#x = self.dropout1(x)
# Flatten x with start_dim=1
x = x.view(-1, self.num_flat_features(x))
# Pass data through fc1
x = self.fc1(x)
x = F.relu(x)
#x = self.dropout2(x)
x = self.fc2(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def get_data():
f = open("D:\\Studium\\Bachelorarbeit\\Unity Projekte\\A-Stern Test\\A-Stern Test\\Assets\\Resources\\single_example_data.txt", "r")
# Using readlines()
Lines = f.readlines()
weightmatrix = []
points_return = []
target_data_return = []
count = 0
# Strips the newline character
for line in Lines:
elements = line.split(";")
#get the weightmatrix
x = elements[0].split(", ")
x.remove("")
for i in range(0, len(x), 1):
x[i] = int(x[i])
weightmatrix.append(x)
#get the start and endpoints
points = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
y = elements[1].split(" ")
for i in range(0, len(y), 1):
y[i] = y[i].strip('[]')
y[i] = y[i].split(",")
for j in range(0, len(y[i]), 1):
y[i][j] = int(y[i][j])
points[y[i][0]][y[i][1]] = 1
points_return.append(points)
#points.append(torch.tensor(y))
#get the path and transform it into a matrix
z = elements[2].split(", ")
z.remove("")
for i in range(0, len(z), 1):
z[i] = z[i].strip('()')
z[i] = z[i].split(",")
for j in range(0, len(z[i]), 1):
z[i][j] = int(z[i][j])
target_data = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
for i in range(0, len(z), 1):
target_data[z[i][0]][z[i][1]] = 1
target_data[y[1][0]][y[1][1]] = 1
target_data_return.append(target_data)
count += 1
f.close()
return weightmatrix, points_return, target_data_return, count
if __name__=="__main__":
# Remember that you must call model.eval() to set dropout and batch normalization layers to evaluation mode before running inference. Failing to do this will yield inconsistent inference results
model = Net()
model.load_state_dict(torch.load("D:\\Studium\\Bachelorarbeit\\Machine Learning\\ressources\\models\\model_nr_2.pth"))
model.eval()
weightmatrix, points, target_data, batch_size = get_data()
weightmatrix = torch.tensor(weightmatrix, dtype=torch.float32)
weightmatrix = weightmatrix.view(batch_size, 1,10,10)
points = torch.tensor(points, dtype=torch.float32)
points = points.view(batch_size, 1,10,10)
#merge the weightmatrix and the start-/endpoints to a two-channeled input
weightmatrix = torch.cat((weightmatrix, points), 1)
target_data = torch.tensor(target_data, dtype=torch.float32)
target_data = target_data.view(batch_size, 100)
result = model(weightmatrix)
criterion = nn.MSELoss()
print("loss: ", criterion(result, target_data))
#print ("Result: ", result.view(10,10))
#result = result * 7
result = torch.round(result)
#print(np.reshape(result.detach().numpy(), (10,10)))
result_plot = plt.matshow(np.reshape(result.detach().numpy(), (10,10)), cmap=plt.cm.gray)
result = result.type(torch.int8)
target_data_plot = plt.matshow(np.reshape(target_data.detach().numpy(), (10,10)), cmap=plt.cm.gray)
plt.show()
target_data = target_data.type(torch.int8)
print ("Result: ", result.view(10,10))
print ("Target Data: ", target_data.view(10,10))
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
import torch
from torch import autograd, nn, optim
import torch.nn.functional as F
from tqdm import tqdm
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# self.conv1 = nn.Conv2d(1, 32, 3)
# self.conv2 = nn.Conv2d(32, 64, 3)
#self.dropout1 = nn.Dropout2d(0.25)
#self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(100, 512)
self.fc2 = nn.Linear(512, 1028)
self.fc3 = nn.Linear(1028, 512)
self.fc4 = nn.Linear(512, 100)
# x represents our data
def forward(self, x):
# Pass data through conv1
# x = self.conv1(x)
# # Use the rectified-linear activation function over x
# x = F.relu(x)
#
# x = self.conv2(x)
# x = F.relu(x)
#
# # Run max pooling over x
# x = F.max_pool2d(x, 2)
# # Pass data through dropout1
# #x = self.dropout1(x)
# # Flatten x with start_dim=1
# x = x.view(-1, self.num_flat_features(x))
# Pass data through fc1
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.relu(x)
#x = self.dropout2(x)
x = self.fc4(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def get_data():
f = open("D:\\Studium\\Bachelorarbeit\\Unity Projekte\\A-Stern Test\\A-Stern Test\\Assets\\Resources\\training_data.txt", "r")
# Using readlines()
Lines = f.readlines()
weightmatrix = []
points_return = []
target_data_return = []
count = 0
# Strips the newline character
for line in Lines:
elements = line.split(";")
#get the weightmatrix
x = elements[0].split(", ")
x.remove("")
for i in range(0, len(x), 1):
x[i] = int(x[i])
weightmatrix.append(x)
#get the start and endpoints
points = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
y = elements[1].split(" ")
for i in range(0, len(y), 1):
y[i] = y[i].strip('[]')
y[i] = y[i].split(",")
for j in range(0, len(y[i]), 1):
y[i][j] = int(y[i][j])
points[y[i][0]][y[i][1]] = 1000
points_return.append(points)
#points.append(torch.tensor(y))
#get the path and transform it into a matrix
z = elements[2].split(", ")
z.remove("")
for i in range(0, len(z), 1):
z[i] = z[i].strip('()')
z[i] = z[i].split(",")
for j in range(0, len(z[i]), 1):
z[i][j] = int(z[i][j])
target_data = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
for i in range(0, len(z), 1):
target_data[z[i][0]][z[i][1]] = 100
target_data[y[1][0]][y[1][1]] = 100
target_data_return.append(target_data)
count += 1
f.close()
return weightmatrix, points_return, target_data_return, count
if __name__ == '__main__':
weightmatrix, points, target_data, batch_size = get_data()
weightmatrix = torch.tensor(weightmatrix, dtype=torch.float32)
weightmatrix = weightmatrix.view(batch_size, 1,10,10)
#weightmatrix = weightmatrix.type(torch.int8)
points = torch.tensor(points, dtype=torch.float32)
points = points.view(batch_size, 1,10,10)
#merge the weightmatrix and the start-/endpoints to a two-channeled input
#weightmatrix = torch.cat((weightmatrix, points), 1)
target_data = torch.tensor(target_data, dtype=torch.float32)
target_data = target_data.view(batch_size, 100)
target_data = target_data + weightmatrix.view(batch_size, 100)
target_data = target_data + points.view(batch_size, 100)
#target_data.type(torch.int16)
learning_rate = 0.1
model_path = 'D:\\Studium\\Bachelorarbeit\\Machine Learning\\ressources\\models'
nn_pathfinder = Net()
opt = optim.SGD(params=nn_pathfinder.parameters(), lr=learning_rate)
result = nn_pathfinder(weightmatrix.view(batch_size, 100))
criterion = nn.MSELoss()
#smoothl1 - loss testen
#print ("Result: ", result)
print("before training")
# print ("Result: ", result.flatten())
# print ("Target Data: ", target_data.flatten())
print("loss: ", criterion(result, target_data))
for epoch in tqdm(range(7500)):
result = nn_pathfinder(weightmatrix.view(batch_size, 100))
#loss = F.nll_loss(result, target_data)
loss = criterion(result, target_data)
#if(epoch % 10 == 0):
print("loss" , loss)
nn_pathfinder.zero_grad()
loss.backward()
opt.step()
print("after")
# print ("Result: ", result.flatten())
# #result = result.type(torch.int8)
# result = torch.round(result)
# result = result.type(torch.int8)
# target_data = target_data.type(torch.int8)
# print ("Result: ", result.view(10,10))
# print ("Target Data: ", target_data.view(10,10))
print("loss: ", loss)
torch.save(nn_pathfinder.state_dict(),
model_path + "\\ansatz2_model_nr_" + "1" + ".pth")
import torch
from torch import autograd, nn, optim
import torch.nn.functional as F
from tqdm import tqdm
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(2, 32, 3)
self.conv2 = nn.Conv2d(32, 64, 3)
self.fc1 = nn.Linear(576, 512)
self.fc2 = nn.Linear(512, 100)
# x represents our data
def forward(self, x):
# Pass data through conv1
x = self.conv1(x)
# Use the rectified-linear activation function over x
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
# Run max pooling over x
x = F.max_pool2d(x, 2)
# Flatten x with start_dim=1
x = x.view(-1, self.num_flat_features(x))
# Pass data through fc1
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def get_data():
f = open("D:\\Studium\\Bachelorarbeit\\Unity Projekte\\A-Stern Test\\A-Stern Test\\Assets\\Resources\\training_data.txt", "r")
# Using readlines()
Lines = f.readlines()
weightmatrix = []
points_return = []
target_data_return = []
count = 0
# Strips the newline character
for line in Lines:
elements = line.split(";")
#get the weightmatrix
x = elements[0].split(", ")
x.remove("")
for i in range(0, len(x), 1):
x[i] = int(x[i])
weightmatrix.append(x)
#get the start and endpoints
points = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
y = elements[1].split(" ")
for i in range(0, len(y), 1):
y[i] = y[i].strip('[]')
y[i] = y[i].split(",")
for j in range(0, len(y[i]), 1):
y[i][j] = int(y[i][j])
points[y[i][0]][y[i][1]] = 1
points_return.append(points)
#get the path and transform it into a matrix
z = elements[2].split(", ")
z.remove("")
for i in range(0, len(z), 1):
z[i] = z[i].strip('()')
z[i] = z[i].split(",")
for j in range(0, len(z[i]), 1):
z[i][j] = int(z[i][j])
target_data = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
for i in range(0, len(z), 1):
target_data[z[i][0]][z[i][1]] = 1
target_data[y[1][0]][y[1][1]] = 1
target_data_return.append(target_data)
count += 1
f.close()
return weightmatrix, points_return, target_data_return, count
if __name__ == '__main__':
weightmatrix, points, target_data, batch_size = get_data()
weightmatrix = torch.tensor(weightmatrix, dtype=torch.float32)
weightmatrix = weightmatrix.view(batch_size, 1,10,10)
points = torch.tensor(points, dtype=torch.float32)
points = points.view(batch_size, 1,10,10)
#merge the weightmatrix and the start-/endpoints to a two-channeled input
weightmatrix = torch.cat((weightmatrix, points), 1)
target_data = torch.tensor(target_data, dtype=torch.float32)
target_data = target_data.view(batch_size, 100)
learning_rate = 0.1
model_path = 'D:\\Studium\\Bachelorarbeit\\Machine Learning\\ressources\\models'
nn_pathfinder = Net()
opt = optim.SGD(params=nn_pathfinder.parameters(), lr=learning_rate)
result = nn_pathfinder(weightmatrix)
criterion = nn.MSELoss()
print("before training")
print ("Result: ", result.flatten())
print ("Target Data: ", target_data.flatten())
print("loss: ", criterion(result, target_data))
for epoch in tqdm(range(7500)):
result = nn_pathfinder(weightmatrix)
loss = criterion(result, target_data)
nn_pathfinder.zero_grad()
loss.backward()
opt.step()
print("after")
# print ("Result: ", result.flatten())
# result = torch.round(result)
# result = result.type(torch.int8)
# target_data = target_data.type(torch.int8)
# print ("Result: ", result.view(10,10))
# print ("Target Data: ", target_data.view(10,10))
print("loss: ", loss)
torch.save(nn_pathfinder.state_dict(),
model_path + "\\model_nr_" + "2" + ".pth")
import torch
from torch import autograd, nn, optim
import torch.nn.functional as F
from tqdm import tqdm
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3)
self.conv2 = nn.Conv2d(32, 64, 3)
self.fc1 = nn.Linear(576, 128)
self.fc2 = nn.Linear(128, 100)
# x represents our data
def forward(self, x):
# Pass data through conv1
x = self.conv1(x)
# Use the rectified-linear activation function over x
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
# Run max pooling over x
x = F.max_pool2d(x, 2)
# Flatten x with start_dim=1
x = x.view(-1, self.num_flat_features(x))
# Pass data through fc1
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def get_data():
f = open("D:\\Studium\\Bachelorarbeit\\Unity Projekte\\A-Stern Test\\A-Stern Test\\Assets\\Resources\\data.txt", "r")
# Using readlines()
Lines = f.readlines()
weightmatrix = []
points = []
target_data_return = []
count = 0
# Strips the newline character
for line in Lines:
elements = line.split(";")
#get the weightmatrix
x = elements[0].split(", ")
x.remove("")
for i in range(0, len(x), 1):
x[i] = int(x[i])
weightmatrix.append(x)
#get the start and endpoints
y = elements[1].split(" ")
for i in range(0, len(y), 1):
y[i] = y[i].strip('[]')
y[i] = y[i].split(",")
for j in range(0, len(y[i]), 1):
y[i][j] = int(y[i][j])
points.append(torch.tensor(y))
#get the path and transform it into a matrix
z = elements[2].split(", ")
z.remove("")
for i in range(0, len(z), 1):
z[i] = z[i].strip('()')
z[i] = z[i].split(",")
for j in range(0, len(z[i]), 1):
z[i][j] = int(z[i][j])
target_data = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] #torch.zeros([10, 10])
for i in range(0, len(z), 1):
target_data[z[i][0]][z[i][1]] = 1
target_data[y[1][0]][y[1][1]] = 1
target_data_return.append(target_data)
count += 1
f.close()
return weightmatrix, points, target_data_return, count
weightmatrix, points, target_data, batch_size = get_data()
weightmatrix = torch.tensor(weightmatrix)
weightmatrix = weightmatrix.view(batch_size, 1,10,10)
learning_rate = 0.1
target_data = torch.tensor(target_data)
target_data = target_data.view(batch_size, 1,10,10)
my_nn = Net()
opt = optim.SGD(params=my_nn.parameters(), lr=learning_rate)
result = my_nn(weightmatrix)
criterion = nn.MSELoss()
print("before training")
print ("Result: ", result.flatten())
print ("Target Data: ", target_data.flatten())
print("loss: ", criterion(result, target_data))
for epoch in tqdm(range(7500)):
result = my_nn(weightmatrix)
loss = criterion(result, target_data)
my_nn.zero_grad()
loss.backward()
opt.step()
print("after")
print ("Result: ", result.flatten())
print ("Target Data: ", target_data.flatten())
print("loss: ", loss)
import torch
from torch import nn
import numpy as np
class Model(nn.Module):
def __init__(self, input_size, output_size, hidden_dim, n_layers):
super(Model, self).__init__()
# Defining some parameters
self.hidden_dim = hidden_dim
self.n_layers = n_layers
#Defining the layers
# RNN Layer
self.rnn = nn.RNN(input_size, hidden_dim, n_layers, batch_first=True)
# Fully connected layer
self.fc = nn.Linear(hidden_dim, output_size)
def forward(self, x):
batch_size = x.size(0)
#Initializing hidden state for first input using method defined below
hidden = self.init_hidden(batch_size)
# Passing in the input and hidden state into the model and obtaining outputs
out, hidden = self.rnn(x, hidden)
# Reshaping the outputs such that it can be fit into the fully connected layer
out = out.contiguous().view(-1, self.hidden_dim)
out = self.fc(out)
return out, hidden
def init_hidden(self, batch_size):
# This method generates the first hidden state of zeros which we'll use in the forward pass
hidden = torch.zeros(self.n_layers, batch_size, self.hidden_dim).to(device)
# We'll send the tensor holding the hidden state to the device we specified earlier as well
return hidden
This source diff could not be displayed because it is too large. You can view the blob instead.
import numpy as np
import time
import os
from matplotlib import pyplot as plt
import torch
from torch import autograd, nn, optim
import torch.nn.functional as F
import cv2
#from scipy.optimize import curve_fit
#erste auslesung der Daten
f = open("D:\\Studium\\Bachelorarbeit\\Machine Learning\\loss_function\\complete_statistics_changing_gapweight_clusterweight0,5.txt", "r")
# Using readlines()
Lines = f.readlines()
f.close()
array = []
for line in Lines:
if "min_distance" in line:
#tmp_array = line.split("number_of_pixels: tensor(")
tmp_array = line.split("min_distance: ")
tmp_array2 = tmp_array[1].split(",")
tmp_array2 = tmp_array2[0].split(".")
if "tensor" in tmp_array2[0]:
tmp_array2 = tmp_array2[0].split("tensor(")
tmp_array2 = tmp_array2[1].split(")")
array.append(float(tmp_array2[0]))
#print(tmp_array2[0])
array1 = np.array(array)
#zweite auslesung der Daten
f = open("D:\\Studium\\Bachelorarbeit\\Machine Learning\\loss_function\\complete_statistics_changing_gapweight_clusterweight1,1.txt", "r")
Lines = f.readlines()
f.close()
array = []
for line in Lines:
if "min_distance" in line:
#tmp_array = line.split("number_of_pixels: tensor(")
tmp_array = line.split("min_distance: ")
tmp_array2 = tmp_array[1].split(",")
tmp_array2 = tmp_array2[0].split(".")
if "tensor" in tmp_array2[0]:
tmp_array2 = tmp_array2[0].split("tensor(")
tmp_array2 = tmp_array2[1].split(")")
array.append(float(tmp_array2[0]))
#print(tmp_array2[0])
array2 = np.array(array)
#dritte auslesung der Daten
f = open("D:\\Studium\\Bachelorarbeit\\Machine Learning\\loss_function\\complete_statistics_changing_gapweight_clusterweight2.txt", "r")
Lines = f.readlines()
f.close()
array = []
for line in Lines:
if "min_distance" in line:
#tmp_array = line.split("number_of_pixels: tensor(")
tmp_array = line.split("min_distance: ")
tmp_array2 = tmp_array[1].split(",")
tmp_array2 = tmp_array2[0].split(".")
if "tensor" in tmp_array2[0]:
tmp_array2 = tmp_array2[0].split("tensor(")
tmp_array2 = tmp_array2[1].split(")")
array.append(float(tmp_array2[0]))
#print(tmp_array2[0])
array3 = np.array(array)
#x = np.arange(0, 10, 0.1)
#arr = np.array([1, 2, 3, 4, 5])
x = np.arange(0, 10, 0.1)
x2 = np.arange(0, 5000, 50)
#changing clusterzize weight
# plt.plot(x, array1, label="Minimal cluster distance with Gapweight 1000")
# plt.plot(x, array2, label="Minimal cluster distance with Gapweight 3000")
# plt.plot(x, array3, label="Minimal cluster distance with Gapweight 5000")
#changing gap-weight
#plt.plot(x2, array1, label="Minimal cluster distance with Clusterweight 0.5")
#plt.plot(x2, array2, label="Minimal cluster distance with Clusterweight 1.1")
plt.plot(x2, array3, label="Minimal cluster distance with Clusterweight 2")
plt.xlabel("Weight of the gap-loss")
plt.ylabel("Minimal distance(in Pixels)")
plt.legend(loc="upper left")
plt.grid()
plt.ylim(-1, 10)
plt.axhline(color='black', lw=0.75)
plt.axvline(color='black', lw=0.75)
plt.savefig("min_distance_changing_gapweight_clusterweight2")
plt.show()
import numpy as np
import time
import os
from matplotlib import pyplot as plt
import torch
from torch import autograd, nn, optim
import torch.nn.functional as F
import cv2
#from scipy.optimize import curve_fit
class CustomLoss(nn.Module):
def init(self):
super(CustomLoss,self).__init__()
def forward(self, result_given, points_given, weightmatrix_given):
#variables for easier variation of the loss function
weight = 20000
gap_weight = 3000 #1200
cluster_size_weight = 3
weight_weight = 1.1 #0.5
result_size = result_given.size()
loss = torch.tensor([0], dtype=torch.float32, requires_grad = True)
for i in range(0, result_size[0]):
result = result_given[i, 0, 0:, 0:]
weightmatrix = weightmatrix_given[i, 0, 0:, 0:]
# print("result", result)
# result_img = result.round().view(10,10)
# result_imgplot = plt.matshow(result_img.detach().numpy())
points = points_given[i]
# if i%100 == 0:
# print(i)
#loss = torch.tensor([0], dtype=torch.float32, requires_grad = True)
manhattan_distance_start_end = self.estimate_manhattan_distance(points[0][0], points[0][1], points[1][0], points[1][1])
# soa_cells = torch.tensor([0], dtype=torch.float32, requires_grad = True) #sum of all cells
# soa_cells_inv = torch.tensor([0], dtype=torch.float32, requires_grad = True) #sum of all cells inverted
# for row in result:
# for column in row:
# soa_cells += column
# soa_cells_inv += (1-column)
soa_cells = sum(sum(result))
soa_cells_inv = 100 - sum(sum(result))
#print(result)
#print(soa_cells)
#set the start and endpoint to 1
loss_start = torch.tensor([0], dtype=torch.float32, requires_grad = True)
if(result[points[0][0]][points[0][1]].round() == 0 or result[points[1][0]][points[1][1]] == 0):
loss_start = (2-(result[points[0][0]][points[0][1]] + result[points[1][0]][points[1][1]])) * weight
loss_start = loss_start.view(1)
#first compute the clusters
img = result.round()
img = img.detach().numpy()
img = np.array(img, dtype=np.uint8)
num_labels, labels_im = cv2.connectedComponents(img)
# print(labels_im)
# imgplot = plt.matshow(labels_im, cmap=plt.cm.gray)
# imgplot = plt.matshow(labels_im)
# plt.show()
start_value = labels_im[points[0][0]][points[0][1]]
end_value = labels_im[points[1][0]][points[1][1]]
# print(start_value)
# print(end_value)
#remodel the matrix, so that distance_transform can be used
start_cluster = []
gap_loss = torch.tensor([0], dtype=torch.float32)
if(start_value != 0 and end_value != 0):
column_nr = len(labels_im)
row_nr = len(labels_im[0])
for i in range(0, column_nr):
for j in range(0, row_nr):
if (labels_im[i][j] == end_value):
labels_im[i][j] = 0
elif (labels_im[i][j] == 0):
labels_im[i][j] = end_value
elif (labels_im[i][j] == start_value):
start_cluster.append([i,j])
if(end_value == start_value):
if(labels_im[i][j] == 0):
start_cluster.append([i,j])
# perform the distance transform
labels_im = np.array(labels_im, dtype=np.uint8)
dist_img = cv2.distanceTransform(labels_im, distanceType=cv2.DIST_L1, maskSize=3).astype(np.float32)
#print(dist_img[points[0][0]][points[0][1]])
#get the min distance to the end_cluster from the start_cluster
min_distance = dist_img[points[0][0]][points[0][1]]
for cell in start_cluster:
if dist_img[cell[0]][cell[1]] < min_distance:
min_distance = dist_img[cell[0]][cell[1]]
#print(min_distance)
gap_loss = min_distance * soa_cells_inv * gap_weight #* soa_cells_inv # *(1-result[0][0]) funktioniert auch nicht -> Backward kann das nicht (alles in eine Zeile packen -> das + scheint alles kaputt zu machen???)
#loss = torch.cat((loss, gap_loss), 0)
else:
# loss_start += (2-(result[points[0][0]][points[0][1]] + result[points[1][0]][points[1][1]])) * weight
# loss_start = loss_start.view(1)
gap_loss = (2-(result[points[0][0]][points[0][1]] + result[points[1][0]][points[1][1]])) * weight
gap_loss = gap_loss.view(1)
#loss = torch.cat((loss, loss_start), 0)
# cluster_cells = torch.tensor([0], dtype=torch.float32) #sum of all cluster_cells
# for cell in start_cluster:
# #cluster_cells += result[cell[0]][cell[1]]
# cluster_cells = torch.cat((cluster_cells.view(1), result[cell[0]][cell[1]].view(1)), 0)
# cluster_cells = sum(cluster_cells)
cluster_size_penalty = torch.tensor([0], dtype=torch.float32, requires_grad = True)
#cluster_size_penalty = soa_cells * cluster_size_weight * abs(manhattan_distance_start_end - len(start_cluster)) #alternativ: penalty for every 1 outsde the clusters, and a penalty depending on len(start_cluster)
cluster_size_penalty = sum((result*weightmatrix).view(100)) * weight_weight * abs(manhattan_distance_start_end - len(start_cluster))
#möglicherweise einfach cluster_size_weight von weightmatrix abhängig machen? Auf jeden Fall noch weightmatrix bei points runtersetzen, sonst wird das weight dort viel zu hoch
# weight_loss = sum((result*weightmatrix).view(100)) * weight_weight
#loss = loss_start + lonelyness_penalty + single_cell_penalty + cluster_size_penalty + gap_penalty + loss
# print("loss_start: " , loss_start)
# print("gap_loss: " , gap_loss)
# print("cluster_size_penalty: " , cluster_size_penalty)
#Concatenate the loss with the other losses from the batch
loss = torch.cat((loss, loss_start + gap_loss + cluster_size_penalty), 0)
#loss = loss_start + gap_loss + cluster_size_penalty
#print(sum(loss))
#print(loss_start.grad_fn)
# loss_start.retain_grad()
# gap_loss.retain_grad()
# cluster_size_penalty.retain_grad()
# return gap_loss
#return the mean of all losses over the batch
return sum(loss)/result_size[0]
#return loss_start + cluster_size_penalty + gap_loss #+ weight_loss
#return sum(loss)
#return sum(loss), gap_loss, cluster_size_penalty
#print(result[points[0][0]][points[0][1]])
def estimate_manhattan_distance(self, start_x, start_y, end_x, end_y):
return abs(end_x - start_x) + abs(end_y - start_y)
def determine_loss(path, batch_size, points, weightmatrix):
filepath = "D:\\Studium\\Bachelorarbeit\\Machine Learning\\loss_function\\" + path + ".txt"
f = open(filepath, "r")
# Using readlines()
Lines = f.readlines()
in_result = 0
criterion = CustomLoss()
tmp_array2 = ''
tmp_array3 = []
# Strips the newline character
for line in Lines:
if(in_result == 1):
if "]" in line:
tmp_array = line.split("]")
tmp_array2 += tmp_array[0]
in_result = 0
array = tmp_array2.split(",")
result = []
for value in array:
if "\n" in value:
result.append(float(value.split("\n ")[1]))
elif " " in value:
if "." in value:
result.append(float(value.split(" ")[1].split(".")[0]))
else:
result.append(float(value.split(" ")[1]))
else:
# if "." in value:
# result.append(int(value.split(".")[0]))
# else:
result.append(float(value))
#print(torch.tensor(result).view(10,10))
tmp_array2 = ''
result = torch.tensor(result, dtype=torch.float32)
loss = criterion(result_given=result.view(batch_size,1,10,10), points_given=points, weightmatrix_given=weightmatrix)
loss = loss.view(1)
loss = loss.detach().numpy()
tmp_array3.append(loss[0])
else:
tmp_array2 += line
# if "min_distance" in line:
# #tmp_array = line.split("number_of_pixels: tensor(")
# tmp_array = line.split("min_distance: ")
# tmp_array2 = tmp_array[1].split(",")
# tmp_array2 = tmp_array2[0].split(".")
# print(tmp_array2[0])
if "result:" in line:
#tmp_array = line.split("number_of_pixels: tensor(")
tmp_array = line.split("result: tensor([")
tmp_array2 = tmp_array[1]
in_result = 1
tmp_array3 = np.array(tmp_array3)
return tmp_array3/tmp_array3.max()
def get_data():
f = open("D:\\Studium\\Bachelorarbeit\\Unity Projekte\\A-Stern Test\\A-Stern Test\\Assets\\Resources\\single_example_data2.txt", "r")
# Using readlines()
Lines = f.readlines()
weightmatrix = []
points = []
target_data_return = []
count = 0
# Strips the newline character
for line in Lines:
#print("{}".format(line.strip()))
#print(line)
elements = line.split(";")
#get the weightmatrix
x = elements[0].split(", ")
x.remove("")
for i in range(0, len(x), 1):
x[i] = int(x[i])
#tensor = torch.tensor(x)
#weightmatrix = torch.stack((weightmatrix, tensor.view(-1, 10)), dim=0)
weightmatrix.append(x)
#get the start and endpoints
y = elements[1].split(" ")
for i in range(0, len(y), 1):
y[i] = y[i].strip('[]')
y[i] = y[i].split(",")
for j in range(0, len(y[i]), 1):
y[i][j] = int(y[i][j])
points.append(torch.tensor(y))
#get the path and transform it into a matrix
z = elements[2].split(", ")
z.remove("")
for i in range(0, len(z), 1):
z[i] = z[i].strip('()')
z[i] = z[i].split(",")
for j in range(0, len(z[i]), 1):
z[i][j] = int(z[i][j])
target_data = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] #torch.zeros([10, 10])
for i in range(0, len(z), 1):
target_data[z[i][0]][z[i][1]] = 1
target_data[y[1][0]][y[1][1]] = 1
target_data_return.append(target_data)
count += 1
f.close()
return weightmatrix, points, target_data_return, count
weightmatrix, points, target_data, batch_size = get_data() #points = [0,3],[5,7]
weightmatrix = torch.tensor(weightmatrix, dtype=torch.float32)
weightmatrix = weightmatrix.view(batch_size, 1,10,10)
#erste auslesung der Daten
#array1 = determine_loss(path="complete_statistics_changing_gapweight_clusterweight0,5", batch_size=batch_size, points=points, weightmatrix=weightmatrix)
array1 = determine_loss(path="complete_statistics_changing_gapweight_clusterweight0,5", batch_size=batch_size, points=points, weightmatrix=weightmatrix)
#zweite auslesung der Daten
#array2 = determine_loss(path="complete_statistics_changing_gapweight_clusterweight1,1", batch_size=batch_size, points=points, weightmatrix=weightmatrix)
array2 = determine_loss(path="complete_statistics_changing_gapweight_clusterweight1,1", batch_size=batch_size, points=points, weightmatrix=weightmatrix)
#dritte auslesung der Daten
#array3 = determine_loss(path="complete_statistics_changing_gapweight_clusterweight2", batch_size=batch_size, points=points, weightmatrix=weightmatrix)
array3 = determine_loss(path="complete_statistics_changing_gapweight_clusterweight2", batch_size=batch_size, points=points, weightmatrix=weightmatrix)
#x = np.arange(0, 10, 0.1)
#arr = np.array([1, 2, 3, 4, 5])
x = np.arange(0, 10, 0.1)
x2 = np.arange(0, 5000, 50)
#changing gapweight stats
#plt.plot(x2, array1, label="Loss with Clusterweight 0.5")
#plt.plot(x2, array2, label="Loss with Clusterweight 1.1")
plt.plot(x2, array3, label="Loss with Clusterweight 2")
#changing clusterweight stats
# plt.plot(x, array1, label="Loss with Gapweight 1000")
# plt.plot(x, array2, label="Loss with Gapweight 3000")
# plt.plot(x, array3, label="Loss with Gapweight 5000")
plt.xlabel("Weight of the gap-loss")
# family='serif',
# color='r',
# weight='normal',
# size = 16,
# labelpad = 6)
plt.ylabel("Normalized Loss")
plt.legend(loc="upper left")
plt.grid()
plt.ylim(-0.1, 1.3)
plt.axhline(color='black', lw=0.75)
plt.axvline(color='black', lw=0.75)
plt.savefig("normalized_loss_changing_gapweight_clusterweight2")
plt.show()
import numpy as np
import time
import os
from matplotlib import pyplot as plt
import torch
from torch import autograd, nn, optim
import torch.nn.functional as F
import cv2
#from scipy.optimize import curve_fit
def determine_startcluster_size(path):
filepath = "D:\\Studium\\Bachelorarbeit\\Machine Learning\\loss_function\\" + path + ".txt"
f = open(filepath, "r")
# Using readlines()
Lines = f.readlines()
in_result = 0
tmp_array2 = ''
tmp_array3 = []
# Strips the newline character
for line in Lines:
if(in_result == 1):
if "]" in line:
tmp_array = line.split("]")
tmp_array2 += tmp_array[0]
in_result = 0
array = tmp_array2.split(".,")
result = []
for value in array:
if "\n" in value:
result.append(int(value.split("\n ")[1]))
elif " " in value:
if "." in value:
result.append(int(value.split(" ")[1].split(".")[0]))
else:
result.append(int(value.split(" ")[1]))
else:
# if "." in value:
# result.append(int(value.split(".")[0]))
# else:
result.append(int(value))
#print(torch.tensor(result).view(10,10))
tmp_array2 = ''
#find the start_cluster_size
result = torch.tensor(result, dtype=torch.float32).view(10,10)
img = result.round()
img = img.detach().numpy()
img = np.array(img, dtype=np.uint8)
num_labels, labels_im = cv2.connectedComponents(img)
start_value = labels_im[4][6]
end_value = labels_im[0][2]
#remodel the matrix, so that distance_transform can be used
start_cluster = []
if(start_value != 0 and end_value != 0):
column_nr = len(labels_im)
row_nr = len(labels_im[0])
for i in range(0, column_nr):
for j in range(0, row_nr):
if (labels_im[i][j] == end_value):
labels_im[i][j] = 0
elif (labels_im[i][j] == 0):
labels_im[i][j] = end_value
elif (labels_im[i][j] == start_value):
start_cluster.append([i,j])
if(end_value == start_value):
if(labels_im[i][j] == 0):
start_cluster.append([i,j])
#print(len(start_cluster))
tmp_array3.append(len(start_cluster))
else:
tmp_array2 += line
# if "min_distance" in line:
# #tmp_array = line.split("number_of_pixels: tensor(")
# tmp_array = line.split("min_distance: ")
# tmp_array2 = tmp_array[1].split(",")
# tmp_array2 = tmp_array2[0].split(".")
# print(tmp_array2[0])
if "result_rounded:" in line:
#tmp_array = line.split("number_of_pixels: tensor(")
tmp_array = line.split("result_rounded: tensor([")
tmp_array2 = tmp_array[1]
in_result = 1
return tmp_array3
#erste auslesung der Daten
#array1 = determine_startcluster_size("complete_statistics_changing_gapweight_clusterweight0,5")
array1 = determine_startcluster_size("complete_statistics_changing_clusterweight_gapweight1000")
#zweite auslesung der Daten
#array2 = determine_startcluster_size("complete_statistics_changing_gapweight_clusterweight1,1")
array2 = determine_startcluster_size("complete_statistics_changing_clusterweight_gapweight3000")
#dritte auslesung der Daten
#array3 = determine_startcluster_size("complete_statistics_changing_gapweight_clusterweight2")
array3 = determine_startcluster_size("complete_statistics_changing_clusterweight_gapweight5000")
x = np.arange(0, 10, 0.1)
arr = np.array([1, 2, 3, 4, 5])
x = np.arange(0, 10, 0.1)
x2 = np.arange(0, 5000, 50)
#changing gapweight stats
#plt.plot(x2, array1, label="Size of the cluster around the startpoint with Clusterweight 0.5")
#plt.plot(x2, array2, label="Size of the cluster around the startpoint with Clusterweight 1.1")
#plt.plot(x2, array3, label="Size of the cluster around the startpoint with Clusterweight 2")
#changing clusterweight stats
plt.plot(x, array1, label="Size of the cluster around the startpoint with Gapweight 1000")
plt.plot(x, array2, label="Size of the cluster around the startpoint with Gapweight 3000")
plt.plot(x, array3, label="Size of the cluster around the startpoint with Gapweight 5000")
plt.xlabel("Weight of the clustersize-loss")
# family='serif',
# color='r',
# weight='normal',
# size = 16,
# labelpad = 6)
plt.ylabel("Clustersize(Number of Pixels)")
plt.legend(loc="upper left")
plt.grid()
plt.ylim(-1, 50)
plt.axhline(color='black', lw=0.75)
plt.axvline(color='black', lw=0.75)
plt.savefig("startcluster_size_changing_clusterweight")
plt.show()
import torch
if __name__=="__main__":
f = open("D:\\Studium\\Bachelorarbeit\\Unity Projekte\\A-Stern Test\\A-Stern Test\\Assets\\Resources\\data.txt", "r")
# Using readlines()
Lines = f.readlines()
# Strips the newline character
for line in Lines:
#print("{}".format(line.strip()))
#print(line)
elements = line.split(";")
#get the weightmatrix
x = elements[0].split(", ")
x.remove("")
for i in range(0, len(x), 1):
x[i] = int(x[i])
tensor = torch.tensor(x)
weightmatrix = tensor.view(-1, 10)
#get the start and endpoints
y = elements[1].split(" ")
for i in range(0, len(y), 1):
y[i] = y[i].strip('[]')
y[i] = y[i].split(",")
for j in range(0, len(y[i]), 1):
y[i][j] = int(y[i][j])
points = torch.tensor(y)
#get the path and transform it into a matrix
z = elements[2].split(", ")
z.remove("")
print(z)
for i in range(0, len(z), 1):
z[i] = z[i].strip('()')
z[i] = z[i].split(",")
for j in range(0, len(z[i]), 1):
z[i][j] = int(z[i][j])
target_data = torch.zeros([10, 10])
for i in range(0, len(z), 1):
target_data[z[i][0]][z[i][1]] = 1
target_data[y[1][0]][y[1][1]] = 1
print(target_data)
# Remember that you must call model.eval() to set dropout and batch normalization layers to evaluation mode before running inference. Failing to do this will yield inconsistent inference results
model = TheModelClass(*args, **kwargs)
model.load_state_dict(torch.load(PATH))
model.eval()
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]
imgplot = plt.matshow(img, cmap=plt.cm.gray)
plt.show()
import torch
from torch import autograd, nn, optim
import torch.nn.functional as F
from tqdm import tqdm
def crop_tensor(x, cropping_size):
if((x.size()[2]/2) >= cropping_size):
return x[0:, 0:, cropping_size:-cropping_size, cropping_size:-cropping_size]
if __name__ == '__main__':
list = [[[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]]]
tensor = torch.tensor(list)
tensor = crop_tensor(tensor, 1)
print(tensor)
import torch
from torch import autograd, nn, optim
import torch.nn.functional as F
from tqdm import tqdm
class U_Net(nn.Module):
def __init__(self):
super(U_Net, self).__init__()
#the way down the u-form:
#first convolution-block
self.down1 = nn.Sequential(
nn.Conv2d(2, 64, 3),
nn.Conv2d(64, 64, 3),
#F.relu()
#F.max_pool2d(kernel_size=2)
)
# self.conv1 = nn.Conv2d(2, 64, 3)
# self.conv1_1 = nn.Conv2d(64, 64, 3)
#max pool2d in forward, also safe the state in the forward function
#second convolution-block
self.down2 = nn.Sequential(
nn.Conv2d(64, 128, 3),
nn.Conv2d(128, 128, 3),
#F.relu()
#F.max_pool2d(kernel_size=2)
)
# self.conv2 = nn.Conv2d(64, 128, 3)
# self.conv2_1 = nn.Conv2d(128, 128, 3)
#max pool2d in forward, also safe the state in the forward function
#third convolution-block
self.down3 = nn.Sequential(
nn.Conv2d(128, 256, 3),
nn.Conv2d(256, 256, 3),
#F.relu()
#F.max_pool2d(kernel_size=2)
)
# self.conv3 = nn.Conv2d(128, 256, 3)
# self.conv3_1 = nn.Conv2d(256, 256, 3)
#max pool2d in forward, also safe the state in the forward function
#forth convolution-block
self.down4 = nn.Sequential(
nn.Conv2d(256, 512, 3),
nn.Conv2d(512, 512, 3),
#F.relu()
#F.max_pool2d(kernel_size=2)
)
# self.conv4 = nn.Conv2d(256, 512, 3)
# self.conv4_1 = nn.Conv2d(512, 512, 3)
#max pool2d in forward, also safe the state in the forward function
#fifth convolution-block and the bottom of the u-form
self.bottom = nn.Sequential(
nn.Conv2d(512, 1024, 3),
nn.Conv2d(1024, 1024, 3),
#F.relu()
)
# self.conv5 = nn.Conv2d(512, 1024, 3)
# self.conv5_1 = nn.Conv2d(1024, 1024, 3)
#the way up the u-form, with the help of upconvolution. Use "ConvTranspose2d" and not(!) "Upsample". Upsample doesn't learn, ConvTranspose2d learns parameters
self.upconv1 = nn.ConvTranspose2d(1024, 512, 2, stride=2)
self.up1 = nn.Sequential(
nn.Conv2d(1024, 512, 3),
nn.Conv2d(512, 512, 3),
#F.relu()
)
self.upconv2 = nn.ConvTranspose2d(512, 256, 2, stride=2)
self.up2 = nn.Sequential(
# nn.ConvTranspose2d(512, 256, 2, stride=2),
nn.Conv2d(512, 256, 3),
nn.Conv2d(256, 256, 3),
#F.relu()
)
self.upconv3 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.up3 = nn.Sequential(
# nn.ConvTranspose2d(256, 128, 2, stride=2),
nn.Conv2d(256, 128, 3),
nn.Conv2d(128, 128, 3),
#F.relu()
)
self.upconv4 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.up4 = nn.Sequential(
# nn.ConvTranspose2d(128, 64, 2, stride=2),
nn.Conv2d(128, 64, 3),
nn.Conv2d(64, 64, 3),
#F.relu()
)
self.final_conv = nn.Conv2d(64, 1, kernel_size=1)
# x represents our data
def forward(self, x):
#down the u
d_1 = self.down1(x)
d_1_tmp = d_1
d_1 = F.max_pool2d(d_1, kernel_size=2)
d_1 = F.relu(d_1)
d_2 = self.down2(d_1)
d_2_tmp = d_2
d_2 = F.max_pool2d(d_2, kernel_size=2)
d_2 = F.relu(d_2)
d_3 = self.down3(d_2)
d_3_tmp = d_3
d_3 = F.max_pool2d(d_3, kernel_size=2)
d_3 = F.relu(d_3)
d_4 = self.down4(d_3)
d_4_tmp = d_4
d_4 = F.max_pool2d(d_4, kernel_size=2)
d_4 = F.relu(d_4)
#bottom
return_value = self.bottom(d_4)
return_value = F.relu(return_value)
#up the u
return_value = self.upconv1(return_value)
return_value = self.up1(self.concatenate_tensors(return_value, self.crop_tensor(d_4_tmp, 4)))
return_value = F.relu(return_value)
return_value = self.upconv2(return_value)
return_value = self.up2(self.concatenate_tensors(return_value, self.crop_tensor(d_3_tmp, 16)))
return_value = F.relu(return_value)
return_value = self.upconv3(return_value)
return_value = self.up3(self.concatenate_tensors(return_value, self.crop_tensor(d_2_tmp, 40)))
return_value = F.relu(return_value)
return_value = self.upconv4(return_value)
return_value = self.up4(self.concatenate_tensors(return_value, self.crop_tensor(d_1_tmp, 88)))
return_value = self.final_conv(return_value)
return return_value
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def crop_tensor(self, x, cropping_size):
if((x.size()[2]/2) >= cropping_size):
return x[0:, 0:, cropping_size:-cropping_size, cropping_size:-cropping_size]
def concatenate_tensors(self, x, y):
return torch.cat((x, y), 1) #dimension 1 is the channel-dimension
def init():
# torch.cuda.is_available() checks and returns a Boolean True if a GPU is available, else it'll return False
is_cuda = torch.cuda.is_available()
# If we have a GPU available, we'll set our device to GPU. We'll use this device variable later in our code.
if is_cuda:
device = torch.device("cuda")
print("GPU is available")
else:
device = torch.device("cpu")
print("GPU not available, CPU used")
return device
if __name__ == '__main__':
device = init()
weightmatrix = torch.rand(1,2,572,572)
target_data = torch.rand(1,1,388,388)
learning_rate = 0.01
model_path = 'D:\\Studium\\Bachelorarbeit\\Machine Learning\\ressources\\models'
nn_pathfinder = U_Net()
opt = optim.SGD(params=nn_pathfinder.parameters(), lr=learning_rate)
nn_pathfinder.to(device)
result = nn_pathfinder(weightmatrix)
criterion = nn.MSELoss()
#print ("Result: ", result)
print("before training")
# print ("Result: ", result.flatten())
# print ("Target Data: ", target_data.flatten())
print("loss: ", criterion(result, target_data))
for epoch in tqdm(range(10)):
result = nn_pathfinder(weightmatrix)
loss = criterion(result, target_data)
#if(epoch % 10 == 0):
# print("loss" , loss)
nn_pathfinder.zero_grad()
loss.backward()
opt.step()
print("after")
# print ("Result: ", result.flatten())
# #result = result.type(torch.int8)
# result = torch.round(result)
# result = result.type(torch.int8)
# target_data = target_data.type(torch.int8)
# print ("Result: ", result.view(10,10))
# print ("Target Data: ", target_data.view(10,10))
print("loss: ", loss)
# save the model
torch.save(nn_pathfinder.state_dict(),
model_path + "\\128x128_u-net_nr_" + "1" + ".pth")
# torch.cuda.is_available() checks and returns a Boolean True if a GPU is available, else it'll return False
is_cuda = torch.cuda.is_available()
# If we have a GPU available, we'll set our device to GPU. We'll use this device variable later in our code.
if is_cuda:
device = torch.device("cuda")
print("GPU is available")
else:
device = torch.device("cpu")
print("GPU not available, CPU used")
#model is your own, defined NN
model.to(device)
import torch
from torch import autograd, nn, optim
import torch.nn.functional as F
from tqdm import tqdm
import cv2
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
f = open("D:\\Studium\\Bachelorarbeit\\Machine Learning\\loss_function\\complete_statistics_changing_clusterweight_gapweight5000.txt", "r")
# Using readlines()
Lines = f.readlines()
in_result = 0
tmp_array2 = ''
# Strips the newline character
for line in Lines:
if(in_result == 1):
if "]" in line:
tmp_array = line.split("]")
tmp_array2 += tmp_array[0]
in_result = 0
array = tmp_array2.split(".,")
result = []
for value in array:
if "\n" in value:
result.append(int(value.split("\n ")[1]))
elif " " in value:
if "." in value:
result.append(int(value.split(" ")[1].split(".")[0]))
else:
result.append(int(value.split(" ")[1]))
else:
# if "." in value:
# result.append(int(value.split(".")[0]))
# else:
result.append(int(value))
#print(torch.tensor(result).view(10,10))
tmp_array2 = ''
#find the start_cluster_size
result = torch.tensor(result, dtype=torch.float32).view(10,10)
img = result.round()
img = img.detach().numpy()
img = np.array(img, dtype=np.uint8)
num_labels, labels_im = cv2.connectedComponents(img)
start_value = labels_im[4][6]
end_value = labels_im[0][2]
#remodel the matrix, so that distance_transform can be used
start_cluster = []
if(start_value != 0 and end_value != 0):
column_nr = len(labels_im)
row_nr = len(labels_im[0])
for i in range(0, column_nr):
for j in range(0, row_nr):
if (labels_im[i][j] == end_value):
labels_im[i][j] = 0
elif (labels_im[i][j] == 0):
labels_im[i][j] = end_value
elif (labels_im[i][j] == start_value):
start_cluster.append([i,j])
if(end_value == start_value):
if(labels_im[i][j] == 0):
start_cluster.append([i,j])
print(len(start_cluster))
else:
tmp_array2 += line
# if "min_distance" in line:
# #tmp_array = line.split("number_of_pixels: tensor(")
# tmp_array = line.split("min_distance: ")
# tmp_array2 = tmp_array[1].split(",")
# tmp_array2 = tmp_array2[0].split(".")
# print(tmp_array2[0])
if "result_rounded:" in line:
#tmp_array = line.split("number_of_pixels: tensor(")
tmp_array = line.split("result_rounded: tensor([")
tmp_array2 = tmp_array[1]
in_result = 1
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
import torch
from torch import autograd, nn, optim
import torch.nn.functional as F
from tqdm import tqdm
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(100, 250)
self.fc2 = nn.Linear(250, 512)
self.fc3 = nn.Linear(512, 250)
self.fc4 = nn.Linear(250, 100)
# x represents our data
def forward(self, x):
# Pass data through fc1
x = self.fc1(x)
x = F.relu(x)
#x = self.dropout2(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.relu(x)
x = self.fc4(x)
x = torch.sigmoid(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
class CustomLoss(nn.Module):
def init(self):
super(CustomLoss,self).__init__()
def forward(self, result_given, points_given):
#variables for easier variation of the loss function
weight = 1000
lonelyness_weight = 15
cluster_weight = 5 #should be a smaller weight than lonelyness_weight (denn die Strafe dafür in einem Cluster nicht eins zu sein sollte kleiner sein, als die Strafe dafür in gar keinem Cluster einen Wert >0 zu haben)
cell_weight = 20 #must be greater than lonelyness_weight (dadurch wird es weniger dafür bestraft eine eins zu sein, als keine eins in dem rechteck zwischen den Start/End Punkten zu sein)
cluster_size_weight = 12 # muss kleiner als lonelyness_weight sein!!! Wirkt invers zu cluster_weight. Very sensible. Idea: Could be a higher penalty depending on the distance between start and endpoint
gap_weight = 300
result_size = result_given.size()
loss = torch.tensor([0], dtype=torch.float32)
for i in range(0, result_size[0]):
result = result_given[i, 0, 0:, 0:]
points = points_given[i]
if i%100 == 0:
print(i)
manhattan_distance_start_end_y = points[1][0] - points[0][0]
manhattan_distance_start_end_x = points[1][1] - points[0][1]
manhattan_distance_start_end = self.estimate_manhattan_distance(points[0][0], points[0][1], points[1][0], points[1][1])
#set the start and endpoint to 1
loss_start = (2-(result[points[0][0]][points[0][1]] + result[points[1][0]][points[1][1]])) * weight
#first compute the clusters
start_cluster = [[points[0][0], points[0][1]]] #here: first y-value, 2nd x-value
for cell in start_cluster:
#add neighborcells into the start_cluster
#left side of the current cell
if cell[1] != 0:
if result[cell[0]][cell[1]-1].round() == 1:
if [cell[0], cell[1]-1] not in start_cluster:
start_cluster.append([cell[0], cell[1]-1])
if cell[0] != 0:
if result[cell[0] - 1][cell[1] - 1].round() == 1:
if [cell[0] - 1, cell[1] - 1] not in start_cluster:
start_cluster.append([cell[0] - 1, cell[1] -1])
if cell[0] != result.size()[0] - 1:
if result[cell[0] + 1][cell[1] - 1].round() == 1:
if [cell[0] + 1, cell[1] - 1] not in start_cluster:
start_cluster.append([cell[0] + 1, cell[1] -1])
#right side of the current cell
if cell[1] != result.size()[1] - 1:
if result[cell[0]][cell[1] + 1].round() == 1:
if [cell[0], cell[1] + 1] not in start_cluster:
start_cluster.append([cell[0], cell[1] + 1])
if cell[0] != 0:
if result[cell[0] - 1][cell[1] + 1].round() == 1:
if [cell[0] - 1, cell[1] + 1] not in start_cluster:
start_cluster.append([cell[0] - 1, cell[1] + 1])
if cell[0] != result.size()[0] - 1:
if result[cell[0] + 1][cell[1] + 1].round() == 1:
if [cell[0] + 1, cell[1] + 1] not in start_cluster:
start_cluster.append([cell[0] + 1, cell[1] + 1])
#upper side of the cell
if cell[0] != 0:
if result[cell[0] - 1][cell[1]].round() == 1:
if [cell[0] - 1, cell[1]] not in start_cluster:
start_cluster.append([cell[0] - 1, cell[1]])
#down side of the cell
if cell[0] != result.size()[0] - 1:
if result[cell[0] + 1][cell[1]].round() == 1:
if [cell[0] + 1, cell[1]] not in start_cluster:
start_cluster.append([cell[0] + 1, cell[1]])
#the start cluster is now created
#use the penalty to form a square between start and endpoint as first approximation of the path. The points outside increase the loss if they are greater 1. This loss increases further away from the path/square.
single_cell_penalty = torch.tensor([0], dtype=torch.float32)
row_counter = -1
column_counter = -1
for row in result:
row_counter += 1
for column in row:
column_counter += 1
m_distance_to_start_and_end = self.estimate_manhattan_distance(row_counter, column_counter, points[0][0], points[0][1]) + self.estimate_manhattan_distance(row_counter, column_counter, points[1][0], points[1][1])
#manhattan distance to both points summed is always greater or equal than the manhattan distance between these both points. It's equal in a square between them, its greater if a point is not in this square
if (m_distance_to_start_and_end - manhattan_distance_start_end == 0):
single_cell_penalty += (1-column) * cell_weight #* (column + ((self.estimate_manhattan_distance(column_counter, row_counter, points[0][0], points[0][1]) + self.estimate_manhattan_distance(column_counter, row_counter, points[1][0], points[1][1])) - manhattan_distance_start_end - 1))
else:
single_cell_penalty += column * ((m_distance_to_start_and_end - manhattan_distance_start_end)/2)
column_counter = -1
#estimate the single_cell_penalty -> penalty for being a 1 without being connected to (element of) the start_cluster.
#penalty for numbers outside the clusters
lonelyness_penalty = torch.tensor([0], dtype=torch.float32)
cluster_size_penalty = torch.tensor([0], dtype=torch.float32)
row_counter = -1
column_counter = -1
for row in result:
row_counter += 1
for column in row:
column_counter += 1
if not ([column_counter, row_counter] in start_cluster):
lonelyness_penalty += column * lonelyness_weight
else:
#penalty, so that the entries in the clusters move to 1
lonelyness_penalty += (1-column) * cluster_weight
#penalty, so that there are not too many entries in the cluster
cluster_size_penalty += column * len(start_cluster) * cluster_size_weight
if(column_counter == points[0][0] and row_counter == points[0][1]):
pass
column_counter = -1
#create a sum of all cell_values to use it as a possible penalty
soa_cells = torch.tensor([0], dtype=torch.float32) #sum of all cells
soa_cells_inv = torch.tensor([0], dtype=torch.float32) #sum of all cells inverted
for row in result:
for column in row:
soa_cells += column
soa_cells_inv += (1-column)
#Multipliziere diese mit der strafe für die Gap. Berechne diese Strafe mit einem gap_weight und der Entfernung des nächsten elements des start_clusters zum Ziel.
gap_size = manhattan_distance_start_end
nearest_cell = [points[0][0], points[0][1]]
for cell in start_cluster:
tmp_gap_size = self.estimate_manhattan_distance(cell[0], cell[1], points[1][0], points[1][1])
if tmp_gap_size < gap_size:
gap_size = tmp_gap_size
nearest_cell = [cell[0], cell[1]]
#search for the next cell in the gap. it is a neighbour of the "nearest_cell". You can use its value as a (inverted) penalty to set it to 1. First find the offset to the goal
offset_y = points[1][0] - nearest_cell[0]
offset_x = points[1][1] - nearest_cell[1]
#if the y-offset is positive, the y-value of the next cell must be higher than the y-value of "nearest_cell" (same for x-value)
next_cell = nearest_cell
gap_next_cell = gap_size
#left side of the nearest cell
if offset_x < 0:
if (self.estimate_manhattan_distance(nearest_cell[0], nearest_cell[1]-1, points[1][0], points[1][1]) < gap_next_cell):
next_cell = [nearest_cell[0], nearest_cell[1]-1]
gap_next_cell = self.estimate_manhattan_distance(nearest_cell[0], nearest_cell[1]-1, points[1][0], points[1][1])
if nearest_cell[0] != 0:
if (self.estimate_manhattan_distance(nearest_cell[0]-1, nearest_cell[1]-1, points[1][0], points[1][1]) < gap_next_cell):
next_cell = [nearest_cell[0]-1, nearest_cell[1]-1]
gap_next_cell = self.estimate_manhattan_distance(nearest_cell[0]-1, nearest_cell[1]-1, points[1][0], points[1][1])
if cell[0] != result.size()[0] - 1:
if (self.estimate_manhattan_distance(nearest_cell[0]+1, nearest_cell[1]-1, points[1][0], points[1][1]) < gap_next_cell):
next_cell = [nearest_cell[0]+1, nearest_cell[1]-1]
gap_next_cell = self.estimate_manhattan_distance(nearest_cell[0]+1, nearest_cell[1]-1, points[1][0], points[1][1])
#right side of the nearest cell
if offset_x > 0:
if (self.estimate_manhattan_distance(nearest_cell[0], nearest_cell[1]+1, points[1][0], points[1][1]) < gap_next_cell):
next_cell = [nearest_cell[0], nearest_cell[1]+1]
gap_next_cell = self.estimate_manhattan_distance(nearest_cell[0], nearest_cell[1]+1, points[1][0], points[1][1])
if nearest_cell[0] != 0:
if (self.estimate_manhattan_distance(nearest_cell[0]-1, nearest_cell[1]+1, points[1][0], points[1][1]) < gap_next_cell):
next_cell = [nearest_cell[0]-1, nearest_cell[1]+1]
gap_next_cell = self.estimate_manhattan_distance(nearest_cell[0]-1, nearest_cell[1]+1, points[1][0], points[1][1])
if cell[0] != result.size()[0] - 1:
if (self.estimate_manhattan_distance(nearest_cell[0]+1, nearest_cell[1]+1, points[1][0], points[1][1]) < gap_next_cell):
next_cell = [nearest_cell[0]+1, nearest_cell[1]+1]
gap_next_cell = self.estimate_manhattan_distance(nearest_cell[0]+1, nearest_cell[1]+1, points[1][0], points[1][1])
#upper side of the cell
if offset_y < 0:
if (self.estimate_manhattan_distance(nearest_cell[0]-1, nearest_cell[1], points[1][0], points[1][1]) < gap_next_cell):
next_cell = [nearest_cell[0]-1, nearest_cell[1]]
gap_next_cell = self.estimate_manhattan_distance(nearest_cell[0]-1, nearest_cell[1], points[1][0], points[1][1])
#down side of the cell
if offset_y > 0:
if (self.estimate_manhattan_distance(nearest_cell[0]+1, nearest_cell[1], points[1][0], points[1][1]) < gap_next_cell):
next_cell = [nearest_cell[0]+1, nearest_cell[1]]
gap_next_cell = self.estimate_manhattan_distance(nearest_cell[0]+1, nearest_cell[1], points[1][0], points[1][1])
gap_penalty = gap_size * gap_weight * (1-result[next_cell[0]][next_cell[1]])
#TODO-Idea: Eine Strafe für die Nähe zu den beiden nicht start/end - ecken des erzeugten vierecks -> Soll den Pfad dünner machen und mehr in die Mitte drücken.
loss = loss_start + lonelyness_penalty + single_cell_penalty + cluster_size_penalty + gap_penalty + loss
return loss
def estimate_manhattan_distance(self, start_x, start_y, end_x, end_y):
return abs(end_x - start_x) + abs(end_y - start_y)
def get_data():
f = open("D:\\Studium\\Bachelorarbeit\\Unity Projekte\\A-Stern Test\\A-Stern Test\\Assets\\Resources\\single_example_data.txt", "r")
# Using readlines()
Lines = f.readlines()
weightmatrix = []
points = []
target_data_return = []
count = 0
# Strips the newline character
for line in Lines:
elements = line.split(";")
#get the weightmatrix
x = elements[0].split(", ")
x.remove("")
for i in range(0, len(x), 1):
x[i] = int(x[i])
weightmatrix.append(x)
#get the start and endpoints
y = elements[1].split(" ")
for i in range(0, len(y), 1):
y[i] = y[i].strip('[]')
y[i] = y[i].split(",")
for j in range(0, len(y[i]), 1):
y[i][j] = int(y[i][j])
points.append(torch.tensor(y))
#get the path and transform it into a matrix
z = elements[2].split(", ")
z.remove("")
for i in range(0, len(z), 1):
z[i] = z[i].strip('()')
z[i] = z[i].split(",")
for j in range(0, len(z[i]), 1):
z[i][j] = int(z[i][j])
target_data = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] #torch.zeros([10, 10])
for i in range(0, len(z), 1):
target_data[z[i][0]][z[i][1]] = 1
target_data[y[1][0]][y[1][1]] = 1
target_data_return.append(target_data)
count += 1
f.close()
return weightmatrix, points, target_data_return, count
weightmatrix, points, target_data, batch_size = get_data()
weightmatrix = torch.tensor(weightmatrix, dtype=torch.float32)
weightmatrix = weightmatrix.view(batch_size, 1,10,10)
random_data = autograd.Variable(torch.rand(1, 1, 10, 10))
model_path = 'D:\\Studium\\Bachelorarbeit\\Machine Learning\\ressources\\models'
criterion = CustomLoss()
my_nn = Net()
my_nn.load_state_dict(torch.load("D:\\Studium\\Bachelorarbeit\\Machine Learning\\ressources\\models\\own_loss_model_nr_1.pth"))
my_nn.eval()
opt = optim.SGD(params=my_nn.parameters(), lr=0.1)
result = my_nn(weightmatrix.view(batch_size,1, 1,100))
for epoch in tqdm(range(15)):
result = my_nn(weightmatrix.view(batch_size,1,1, 100))
loss = criterion(result_given=result.view(batch_size,1,10,10), points_given=points)
print("loss" , loss)
my_nn.zero_grad()
loss.backward()
opt.step()
torch.save(my_nn.state_dict(),
model_path + "\\own_loss_model_nr_" + "1" + ".pth")
print(result.view(10,10))
print(torch.round(result).view(10,10))
import torch
from torch import autograd, nn, optim
import torch.nn.functional as F
from tqdm import tqdm
import cv2
import numpy as np
import matplotlib.pyplot as plt
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(100, 250)
self.fc2 = nn.Linear(250, 512)
self.fc3 = nn.Linear(512, 250)
self.fc4 = nn.Linear(250, 100)
# x represents our data
def forward(self, x):
# Pass data through fc1
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.relu(x)
x = self.fc4(x)
x = torch.sigmoid(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
class CustomLoss(nn.Module):
def init(self):
super(CustomLoss,self).__init__()
def forward(self, result_given, points_given):
#variables for easier variation of the loss function
weight = 100
gap_weight = 10
cluster_size_weight = 90
result_size = result_given.size()
loss = torch.tensor([0], dtype=torch.float32)
for i in range(0, result_size[0]):
result = result_given[i, 0, 0:, 0:]
points = points_given[i]
loss = torch.tensor([0], dtype=torch.float32)
manhattan_distance_start_end = self.estimate_manhattan_distance(points[0][0], points[0][1], points[1][0], points[1][1])
soa_cells = torch.tensor([0], dtype=torch.float32) #sum of all cells
soa_cells_inv = torch.tensor([0], dtype=torch.float32) #sum of all cells inverted
for row in result:
for column in row:
soa_cells += column
soa_cells_inv += (1-column)
#set the start and endpoint to 1
loss_start = (2-(result[points[0][0]][points[0][1]] + result[points[1][0]][points[1][1]])) * weight
loss_start = loss_start.view(1)
#first compute the clusters
img = result.round()
img = img.detach().numpy()
img = np.array(img, dtype=np.uint8)
num_labels, labels_im = cv2.connectedComponents(img)
start_value = labels_im[points[0][0]][points[0][1]]
end_value = labels_im[points[1][0]][points[1][1]]
#remodel the matrix, so that distance_transform can be used
start_cluster = []
gap_loss = torch.tensor([0], dtype=torch.float32)
if(start_value != 0 and end_value != 0):
column_nr = len(labels_im)
row_nr = len(labels_im[0])
for i in range(0, column_nr):
for j in range(0, row_nr):
if (labels_im[i][j] == end_value):
labels_im[i][j] = 0
elif (labels_im[i][j] == 0):
labels_im[i][j] = end_value
elif (labels_im[i][j] == start_value):
start_cluster.append([i,j])
if(end_value == start_value):
if(labels_im[i][j] == 0):
start_cluster.append([i,j])
# perform the distance transform
labels_im = np.array(labels_im, dtype=np.uint8)
dist_img = cv2.distanceTransform(labels_im, distanceType=cv2.DIST_L1, maskSize=3).astype(np.float32)
#get the min distance to the end_cluster from the start_cluster
min_distance = dist_img[points[0][0]][points[0][1]]
for cell in start_cluster:
if dist_img[cell[0]][cell[1]] < min_distance:
min_distance = dist_img[cell[0]][cell[1]]
gap_loss = min_distance * soa_cells_inv * gap_weight * soa_cells_inv
else:
gap_loss = (2-(result[points[0][0]][points[0][1]] + result[points[1][0]][points[1][1]])) * weight
gap_loss = gap_loss.view(1)
loss = torch.cat((loss, loss_start), 0)
cluster_cells = torch.tensor([0], dtype=torch.float32) #sum of all cluster_cells
for cell in start_cluster:
cluster_cells += result[cell[0]][cell[1]]
if(len(start_cluster) != 0):
cluster_size_penalty = cluster_cells * cluster_size_weight
else:
cluster_size_penalty = (2-(result[points[0][0]][points[0][1]] + result[points[1][0]][points[1][1]])) * weight
cluster_size_penalty = cluster_size_penalty.view(1)
return sum(loss), gap_loss, cluster_size_penalty
def estimate_manhattan_distance(self, start_x, start_y, end_x, end_y):
return abs(end_x - start_x) + abs(end_y - start_y)
def get_data():
f = open("D:\\Studium\\Bachelorarbeit\\Unity Projekte\\A-Stern Test\\A-Stern Test\\Assets\\Resources\\single_example_data.txt", "r")
# Using readlines()
Lines = f.readlines()
weightmatrix = []
points = []
target_data_return = []
count = 0
for line in Lines:
elements = line.split(";")
#get the weightmatrix
x = elements[0].split(", ")
x.remove("")
for i in range(0, len(x), 1):
x[i] = int(x[i])
weightmatrix.append(x)
#get the start and endpoints
y = elements[1].split(" ")
for i in range(0, len(y), 1):
y[i] = y[i].strip('[]')
y[i] = y[i].split(",")
for j in range(0, len(y[i]), 1):
y[i][j] = int(y[i][j])
points.append(torch.tensor(y))
#get the path and transform it into a matrix
z = elements[2].split(", ")
z.remove("")
for i in range(0, len(z), 1):
z[i] = z[i].strip('()')
z[i] = z[i].split(",")
for j in range(0, len(z[i]), 1):
z[i][j] = int(z[i][j])
target_data = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] #torch.zeros([10, 10])
for i in range(0, len(z), 1):
target_data[z[i][0]][z[i][1]] = 1
target_data[y[1][0]][y[1][1]] = 1
target_data_return.append(target_data)
count += 1
f.close()
return weightmatrix, points, target_data_return, count
weightmatrix, points, target_data, batch_size = get_data()
weightmatrix = torch.tensor(weightmatrix, dtype=torch.float32)
weightmatrix = weightmatrix.view(batch_size, 1,10,10)
#mark start and end in the weightmatrix
for i in range(0, batch_size):
weightmatrix[i, 0, points[i][0][0], points[i][0][1]] += 1000
weightmatrix[i, 0, points[i][1][0], points[i][1][1]] += 1000
points = [[[3,0],
[7,4]]]
random_data = autograd.Variable(torch.rand(1, 1, 10, 10))
model_path = 'D:\\Studium\\Bachelorarbeit\\Machine Learning\\ressources\\models'
criterion = CustomLoss()
my_nn = Net()
opt = optim.SGD(params=my_nn.parameters(), lr=0.1)
result = my_nn(random_data.view(batch_size,1, 1,100))
print(result.view(10,10))
for epoch in tqdm(range(75)):
result = my_nn(random_data.view(batch_size,1,1, 100))
start_loss, gap_loss, cluster_size_loss = criterion(result_given=result.view(batch_size,1,10,10), points_given=points)
my_nn.zero_grad()
start_loss.backward(retain_graph=True)
gap_loss.backward(retain_graph=True)
cluster_size_loss.backward()
opt.step()
# torch.save(my_nn.state_dict(),
# model_path + "\\own_loss_model_nr_" + "2" + ".pth")
print("loss" , start_loss + gap_loss + cluster_size_loss)
print(result.view(10,10))
#print(torch.round(result).view(10,10))
img = result.round().view(10,10)
imgplot = plt.matshow(img.detach().numpy())
plt.show()
import torch
from torch import autograd, nn, optim
import torch.nn.functional as F
from tqdm import tqdm
import cv2
import numpy as np
import matplotlib.pyplot as plt
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(100, 250)
self.fc2 = nn.Linear(250, 512)
self.fc3 = nn.Linear(512, 250)
self.fc4 = nn.Linear(250, 100)
# x represents our data
def forward(self, x):
# Pass data through fc1
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.relu(x)
x = self.fc4(x)
x = torch.sigmoid(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
class CustomLoss(nn.Module):
def init(self):
super(CustomLoss,self).__init__()
def forward(self, result_given, points_given):
#variables for easier variation of the loss function
weight = 10000
gap_weight = 40 #35
cluster_size_weight = 3
result_size = result_given.size()
for i in range(0, result_size[0]):
result = result_given[i, 0, 0:, 0:]
points = points_given[i]
manhattan_distance_start_end = self.estimate_manhattan_distance(points[0][0], points[0][1], points[1][0], points[1][1])
soa_cells = sum(sum(result))
soa_cells_inv = 100 - sum(sum(result))
#set the start and endpoint to 1
loss_start = torch.tensor([0], dtype=torch.float32, requires_grad = True)
if(result[points[0][0]][points[0][1]].round() == 0 or result[points[1][0]][points[1][1]] == 0):
loss_start = (2-(result[points[0][0]][points[0][1]] + result[points[1][0]][points[1][1]])) * weight
loss_start = loss_start.view(1)
#first compute the clusters
img = result.round()
img = img.detach().numpy()
img = np.array(img, dtype=np.uint8)
num_labels, labels_im = cv2.connectedComponents(img)
start_value = labels_im[points[0][0]][points[0][1]]
end_value = labels_im[points[1][0]][points[1][1]]
#remodel the matrix, so that distance_transform can be used
start_cluster = []
gap_loss = torch.tensor([0], dtype=torch.float32)
if(start_value != 0 and end_value != 0):
column_nr = len(labels_im)
row_nr = len(labels_im[0])
for i in range(0, column_nr):
for j in range(0, row_nr):
if (labels_im[i][j] == end_value):
labels_im[i][j] = 0
elif (labels_im[i][j] == 0):
labels_im[i][j] = end_value
elif (labels_im[i][j] == start_value):
start_cluster.append([i,j])
if(end_value == start_value):
if(labels_im[i][j] == 0):
start_cluster.append([i,j])
# perform the distance transform
labels_im = np.array(labels_im, dtype=np.uint8)
dist_img = cv2.distanceTransform(labels_im, distanceType=cv2.DIST_L1, maskSize=3).astype(np.float32)
#get the min distance to the end_cluster from the start_cluster
min_distance = dist_img[points[0][0]][points[0][1]]
for cell in start_cluster:
if dist_img[cell[0]][cell[1]] < min_distance:
min_distance = dist_img[cell[0]][cell[1]]
gap_loss = min_distance * soa_cells_inv * gap_weight
else:
gap_loss = (2-(result[points[0][0]][points[0][1]] + result[points[1][0]][points[1][1]])) * weight
gap_loss = gap_loss.view(1)
cluster_size_penalty = torch.tensor([0], dtype=torch.float32, requires_grad = True)
cluster_size_penalty = soa_cells * cluster_size_weight * abs(manhattan_distance_start_end - len(start_cluster))
return loss_start + cluster_size_penalty + gap_loss
def estimate_manhattan_distance(self, start_x, start_y, end_x, end_y):
return abs(end_x - start_x) + abs(end_y - start_y)
def get_data():
f = open("D:\\Studium\\Bachelorarbeit\\Unity Projekte\\A-Stern Test\\A-Stern Test\\Assets\\Resources\\single_example_data.txt", "r")
# Using readlines()
Lines = f.readlines()
weightmatrix = []
points = []
target_data_return = []
count = 0
# Strips the newline character
for line in Lines:
elements = line.split(";")
#get the weightmatrix
x = elements[0].split(", ")
x.remove("")
for i in range(0, len(x), 1):
x[i] = int(x[i])
weightmatrix.append(x)
#get the start and endpoints
y = elements[1].split(" ")
for i in range(0, len(y), 1):
y[i] = y[i].strip('[]')
y[i] = y[i].split(",")
for j in range(0, len(y[i]), 1):
y[i][j] = int(y[i][j])
points.append(torch.tensor(y))
#get the path and transform it into a matrix
z = elements[2].split(", ")
z.remove("")
for i in range(0, len(z), 1):
z[i] = z[i].strip('()')
z[i] = z[i].split(",")
for j in range(0, len(z[i]), 1):
z[i][j] = int(z[i][j])
target_data = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] #torch.zeros([10, 10])
for i in range(0, len(z), 1):
target_data[z[i][0]][z[i][1]] = 1
target_data[y[1][0]][y[1][1]] = 1
target_data_return.append(target_data)
count += 1
f.close()
return weightmatrix, points, target_data_return, count
weightmatrix, points, target_data, batch_size = get_data()
weightmatrix = torch.tensor(weightmatrix, dtype=torch.float32)
weightmatrix = weightmatrix.view(batch_size, 1,10,10)
# points = torch.tensor(points, dtype=torch.float32)
# points = points.view(batch_size, 1,10,10)
#mark start and end in the weightmatrix
for i in range(0, batch_size):
weightmatrix[i, 0, points[i][0][0], points[i][0][1]] += 1000
weightmatrix[i, 0, points[i][1][0], points[i][1][1]] += 1000
points = [[[3,0],
[7,4]]]
random_data = autograd.Variable(torch.rand(1, 1, 10, 10),requires_grad = True)
model_path = 'D:\\Studium\\Bachelorarbeit\\Machine Learning\\ressources\\models'
criterion = CustomLoss()
my_nn = Net()
# comment this lines in to evaluate the model
#my_nn.load_state_dict(torch.load("D:\\Studium\\Bachelorarbeit\\Machine Learning\\ressources\\models\\own_loss_model_nr_2.pth"))
#my_nn.eval()
#an optimizer can be chosen
# opt = optim.SGD(params=my_nn.parameters(), lr=0.01)
opt = optim.Adam(params=my_nn.parameters(), lr=0.01)
# opt = optim.AdamW(params=my_nn.parameters(), lr=0.01)
# opt = optim.ASGD(params=my_nn.parameters(), lr=0.01)
result = my_nn(random_data.view(batch_size,1, 1,100))
print(result.view(10,10))
for epoch in tqdm(range(15000)):
result = my_nn(random_data.view(batch_size,1,1, 100))
loss = criterion(result_given=result.view(batch_size,1,10,10), points_given=points)
my_nn.zero_grad()
loss.backward()
#Comment the following lines in to see the gradients of the neural net. The gradients of the last layer(4) can be used as an approximation for the change of the result of the NN.
# if(sum(my_nn.fc1.bias.grad) != 0):
# print(my_nn.fc1.bias.grad)
# if(sum(my_nn.fc2.bias.grad) != 0):
# print(my_nn.fc2.bias.grad)
# if(sum(my_nn.fc3.bias.grad) != 0):
# print(my_nn.fc3.bias.grad)
# if(sum(my_nn.fc4.bias.grad) != 0):
# pass
#print(my_nn.fc4.weight.grad)
#weights = my_nn.fc4.bias.grad *1000
#else:
# print(len(weights))
# weights_img = weights.view(250,100)
# weights_img = weights.view(10,10)
# weights_imgplot = plt.matshow(weights_img.detach().numpy())
# plt.show()
opt.step()
#Comment this lines in to save the model after every trainingsiteration
# torch.save(my_nn.state_dict(),
# model_path + "\\own_loss_model_nr_" + "2" + ".pth")
print("loss" , loss)
print(result.view(10,10))
#Plot the result of the NN
img = result.round().view(10,10)
imgplot = plt.matshow(img.detach().numpy())
plt.show()
import torch
from torch import autograd, nn, optim
import torch.nn.functional as F
from tqdm import tqdm
import cv2
import numpy as np
import matplotlib.pyplot as plt
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(100, 250)
self.fc2 = nn.Linear(250, 512)
self.fc3 = nn.Linear(512, 250)
self.fc4 = nn.Linear(250, 100)
# x represents our data
def forward(self, x):
# Pass data through fc1
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.relu(x)
x = self.fc4(x)
x = torch.sigmoid(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
class CustomLoss(nn.Module):
def init(self):
super(CustomLoss,self).__init__()
def forward(self, result_given, points_given, weightmatrix_given):
#variables for easier variation of the loss function
weight = 20000
gap_weight = 3000 #1200
cluster_size_weight = 3
weight_weight = 1.1 #0.5
result_size = result_given.size()
loss = torch.tensor([0], dtype=torch.float32, requires_grad = True)
for i in range(0, result_size[0]):
result = result_given[i, 0, 0:, 0:]
weightmatrix = weightmatrix_given[i, 0, 0:, 0:]
points = points_given[i]
manhattan_distance_start_end = self.estimate_manhattan_distance(points[0][0], points[0][1], points[1][0], points[1][1])
soa_cells = sum(sum(result))
soa_cells_inv = 100 - sum(sum(result))
#set the start and endpoint to 1
loss_start = torch.tensor([0], dtype=torch.float32, requires_grad = True)
if(result[points[0][0]][points[0][1]].round() == 0 or result[points[1][0]][points[1][1]] == 0):
loss_start = (2-(result[points[0][0]][points[0][1]] + result[points[1][0]][points[1][1]])) * weight
loss_start = loss_start.view(1)
#first compute the clusters
img = result.round()
img = img.detach().numpy()
img = np.array(img, dtype=np.uint8)
num_labels, labels_im = cv2.connectedComponents(img)
start_value = labels_im[points[0][0]][points[0][1]]
end_value = labels_im[points[1][0]][points[1][1]]
#remodel the matrix, so that distance_transform can be used
start_cluster = []
gap_loss = torch.tensor([0], dtype=torch.float32)
if(start_value != 0 and end_value != 0):
column_nr = len(labels_im)
row_nr = len(labels_im[0])
for i in range(0, column_nr):
for j in range(0, row_nr):
if (labels_im[i][j] == end_value):
labels_im[i][j] = 0
elif (labels_im[i][j] == 0):
labels_im[i][j] = end_value
elif (labels_im[i][j] == start_value):
start_cluster.append([i,j])
if(end_value == start_value):
if(labels_im[i][j] == 0):
start_cluster.append([i,j])
# perform the distance transform
labels_im = np.array(labels_im, dtype=np.uint8)
dist_img = cv2.distanceTransform(labels_im, distanceType=cv2.DIST_L1, maskSize=3).astype(np.float32)
#print(dist_img[points[0][0]][points[0][1]])
#get the min distance to the end_cluster from the start_cluster
min_distance = dist_img[points[0][0]][points[0][1]]
for cell in start_cluster:
if dist_img[cell[0]][cell[1]] < min_distance:
min_distance = dist_img[cell[0]][cell[1]]
gap_loss = min_distance * soa_cells_inv * gap_weight
else:
gap_loss = (2-(result[points[0][0]][points[0][1]] + result[points[1][0]][points[1][1]])) * weight
gap_loss = gap_loss.view(1)
cluster_size_penalty = torch.tensor([0], dtype=torch.float32, requires_grad = True)
cluster_size_penalty = sum((result*weightmatrix).view(100)) * weight_weight * abs(manhattan_distance_start_end - len(start_cluster))
#Concatenate the loss with the other losses from the batch
loss = torch.cat((loss, loss_start + gap_loss + cluster_size_penalty), 0)
#return the mean of all losses over the batch
return sum(loss)/result_size[0]
def estimate_manhattan_distance(self, start_x, start_y, end_x, end_y):
return abs(end_x - start_x) + abs(end_y - start_y)
def get_data():
f = open("D:\\Studium\\Bachelorarbeit\\Unity Projekte\\A-Stern Test\\A-Stern Test\\Assets\\Resources\\200_training_data.txt", "r")
# Using readlines()
Lines = f.readlines()
weightmatrix = []
points = []
target_data_return = []
count = 0
# Strips the newline character
for line in Lines:
elements = line.split(";")
#get the weightmatrix
x = elements[0].split(", ")
x.remove("")
for i in range(0, len(x), 1):
x[i] = int(x[i])
weightmatrix.append(x)
#get the start and endpoints
y = elements[1].split(" ")
for i in range(0, len(y), 1):
y[i] = y[i].strip('[]')
y[i] = y[i].split(",")
for j in range(0, len(y[i]), 1):
y[i][j] = int(y[i][j])
points.append(torch.tensor(y))
#get the path and transform it into a matrix
z = elements[2].split(", ")
z.remove("")
for i in range(0, len(z), 1):
z[i] = z[i].strip('()')
z[i] = z[i].split(",")
for j in range(0, len(z[i]), 1):
z[i][j] = int(z[i][j])
target_data = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] #torch.zeros([10, 10])
for i in range(0, len(z), 1):
target_data[z[i][0]][z[i][1]] = 1
target_data[y[1][0]][y[1][1]] = 1
target_data_return.append(target_data)
count += 1
f.close()
return weightmatrix, points, target_data_return, count
weightmatrix, points, target_data, batch_size = get_data()
weightmatrix = torch.tensor(weightmatrix, dtype=torch.float32)
weightmatrix = weightmatrix.view(batch_size, 1,10,10)
random_data = autograd.Variable(torch.rand(1, 1, 10, 10),requires_grad = True)
model_path = 'D:\\Studium\\Bachelorarbeit\\Machine Learning\\ressources\\models'
criterion = CustomLoss()
my_nn = Net()
# comment this lines in to evaluate the model
#my_nn.load_state_dict(torch.load("D:\\Studium\\Bachelorarbeit\\Machine Learning\\ressources\\models\\own_loss_model_nr_2.pth"))
#my_nn.eval()
#an optimizer can be chosen
# opt = optim.SGD(params=my_nn.parameters(), lr=0.01)
opt = optim.Adam(params=my_nn.parameters(), lr=0.01)
# opt = optim.AdamW(params=my_nn.parameters(), lr=0.01)
# opt = optim.ASGD(params=my_nn.parameters(), lr=0.01)
result = my_nn(weightmatrix.view(batch_size,1, 1,100))
for epoch in tqdm(range(15000)):
result = my_nn(weightmatrix.view(batch_size,1,1, 100))
loss = criterion(result_given=result.view(batch_size,1,10,10), points_given=points, weightmatrix_given=weightmatrix)
my_nn.zero_grad()
loss.backward()
#Comment the following lines in to see the gradients of the neural net. The gradients of the last layer(4) can be used as an approximation for the change of the result of the NN.
# if(sum(my_nn.fc1.bias.grad) != 0):
# print(my_nn.fc1.bias.grad)
# if(sum(my_nn.fc2.bias.grad) != 0):
# print(my_nn.fc2.bias.grad)
# if(sum(my_nn.fc3.bias.grad) != 0):
# print(my_nn.fc3.bias.grad)
# if(sum(my_nn.fc4.bias.grad) != 0):
# #pass
# print(my_nn.fc4.weight.grad)
# weights = my_nn.fc4.bias.grad *1000
# #else:
# #print(len(weights))
# #weights_img = weights.view(250,100)
# weights_img = weights.view(10,10)
# weights_imgplot = plt.matshow(weights_img.detach().numpy())
# plt.show()
#save the model
opt.step()
torch.save(my_nn.state_dict(),
model_path + "\\own_loss_model_nr_" + "4" + ".pth")
# print("loss" , loss)
# print(result.view(10,10))
# #print(torch.round(result).view(10,10))
# img = result.round().view(10,10)
# imgplot = plt.matshow(img.detach().numpy())
# plt.show()
#This script tracks the stats of the final neural net with an adopted lossfunction
import torch
from torch import autograd, nn, optim
import torch.nn.functional as F
from tqdm import tqdm
import cv2
import numpy as np
import matplotlib.pyplot as plt
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(100, 250)
self.fc2 = nn.Linear(250, 512)
self.fc3 = nn.Linear(512, 250)
self.fc4 = nn.Linear(250, 100)
# x represents our data
def forward(self, x):
# Pass data through fc1
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.relu(x)
x = self.fc4(x)
x = torch.sigmoid(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
class CustomLoss(nn.Module):
def init(self):
super(CustomLoss,self).__init__()
def forward(self, result_given, points_given, weightmatrix, weight_weight):
#variables for easier variation of the loss function
weight = 20000
gap_weight = 5000 #3000 #1200
cluster_size_weight = 3
result_size = result_given.size()
for i in range(0, result_size[0]):
result = result_given[i, 0, 0:, 0:]
points = points_given[i]
manhattan_distance_start_end = self.estimate_manhattan_distance(points[0][0], points[0][1], points[1][0], points[1][1])
min_distance = manhattan_distance_start_end
soa_cells = sum(sum(result))
soa_cells_inv = 100 - sum(sum(result))
#set the start and endpoint to 1
loss_start = torch.tensor([0], dtype=torch.float32, requires_grad = True)
if(result[points[0][0]][points[0][1]].round() == 0 or result[points[1][0]][points[1][1]] == 0):
loss_start = (2-(result[points[0][0]][points[0][1]] + result[points[1][0]][points[1][1]])) * weight
loss_start = loss_start.view(1)
#first compute the clusters
img = result.round()
img = img.detach().numpy()
img = np.array(img, dtype=np.uint8)
num_labels, labels_im = cv2.connectedComponents(img)
start_value = labels_im[points[0][0]][points[0][1]]
end_value = labels_im[points[1][0]][points[1][1]]
#remodel the matrix, so that distance_transform can be used
start_cluster = []
gap_loss = torch.tensor([0], dtype=torch.float32)
if(start_value != 0 and end_value != 0):
column_nr = len(labels_im)
row_nr = len(labels_im[0])
for i in range(0, column_nr):
for j in range(0, row_nr):
if (labels_im[i][j] == end_value):
labels_im[i][j] = 0
elif (labels_im[i][j] == 0):
labels_im[i][j] = end_value
elif (labels_im[i][j] == start_value):
start_cluster.append([i,j])
if(end_value == start_value):
if(labels_im[i][j] == 0):
start_cluster.append([i,j])
# perform the distance transform
labels_im = np.array(labels_im, dtype=np.uint8)
dist_img = cv2.distanceTransform(labels_im, distanceType=cv2.DIST_L1, maskSize=3).astype(np.float32)
#get the min distance to the end_cluster from the start_cluster
min_distance = dist_img[points[0][0]][points[0][1]]
for cell in start_cluster:
if dist_img[cell[0]][cell[1]] < min_distance:
min_distance = dist_img[cell[0]][cell[1]]
#print(min_distance)
gap_loss = min_distance * soa_cells_inv * gap_weight
else:
gap_loss = (2-(result[points[0][0]][points[0][1]] + result[points[1][0]][points[1][1]])) * weight
gap_loss = gap_loss.view(1)
cluster_size_penalty = torch.tensor([0], dtype=torch.float32, requires_grad = True)
cluster_size_penalty = sum((result*weightmatrix).view(100)) * weight_weight * abs(manhattan_distance_start_end - len(start_cluster))
return loss_start + cluster_size_penalty + gap_loss , min_distance
def estimate_manhattan_distance(self, start_x, start_y, end_x, end_y):
return abs(end_x - start_x) + abs(end_y - start_y)
def get_data():
f = open("D:\\Studium\\Bachelorarbeit\\Unity Projekte\\A-Stern Test\\A-Stern Test\\Assets\\Resources\\single_example_data2.txt", "r")
# Using readlines()
Lines = f.readlines()
weightmatrix = []
points = []
target_data_return = []
count = 0
# Strips the newline character
for line in Lines:
elements = line.split(";")
#get the weightmatrix
x = elements[0].split(", ")
x.remove("")
for i in range(0, len(x), 1):
x[i] = int(x[i])
weightmatrix.append(x)
#get the start and endpoints
y = elements[1].split(" ")
for i in range(0, len(y), 1):
y[i] = y[i].strip('[]')
y[i] = y[i].split(",")
for j in range(0, len(y[i]), 1):
y[i][j] = int(y[i][j])
points.append(torch.tensor(y))
#get the path and transform it into a matrix
z = elements[2].split(", ")
z.remove("")
for i in range(0, len(z), 1):
z[i] = z[i].strip('()')
z[i] = z[i].split(",")
for j in range(0, len(z[i]), 1):
z[i][j] = int(z[i][j])
target_data = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] #torch.zeros([10, 10])
for i in range(0, len(z), 1):
target_data[z[i][0]][z[i][1]] = 1
target_data[y[1][0]][y[1][1]] = 1
target_data_return.append(target_data)
count += 1
f.close()
return weightmatrix, points, target_data_return, count
weightmatrix, points, target_data, batch_size = get_data()
weightmatrix = torch.tensor(weightmatrix, dtype=torch.float32)
weightmatrix = weightmatrix.view(batch_size, 1,10,10)
random_data = autograd.Variable(torch.rand(1, 1, 10, 10),requires_grad = True)
model_path = 'D:\\Studium\\Bachelorarbeit\\Machine Learning\\ressources\\models'
criterion = CustomLoss()
for i in range(0, 100, 1):
my_nn = Net()
#an optimizer can be chosen
# opt = optim.SGD(params=my_nn.parameters(), lr=0.01)
opt = optim.Adam(params=my_nn.parameters(), lr=0.01)
# opt = optim.AdamW(params=my_nn.parameters(), lr=0.01)
# opt = optim.ASGD(params=my_nn.parameters(), lr=0.01)
result = my_nn(random_data.view(batch_size,1, 1,100))
for epoch in tqdm(range(15000)):
result = my_nn(random_data.view(batch_size,1,1, 100))
loss, min_distance = criterion(result_given=result.view(batch_size,1,10,10), points_given=points, weightmatrix=weightmatrix, weight_weight=(i/10))
my_nn.zero_grad()
loss.backward()
opt.step()
print("loss" , loss)
print(result.view(10,10))
f = open("complete_statistics_changing_clusterweight_gapweight5000.txt", "a")
to_write = "clusterweight: " + str(i/10) + ", min_distance: " + str(min_distance) + ", result: " + str(result.view(100)) + ", result_rounded: " + str(result.round().view(100)) + ", number_of_pixels: " + str(sum(result.round().view(100))) + "\n"
f.write(to_write)
f.close()
clusterweight 0: 0.0
clusterweight 1: 0.0
clusterweight 2: 0.0
clusterweight 3: 0.0
clusterweight 4: 0.0
clusterweight 5: 0.0
clusterweight 6: 0.0
clusterweight 7: 0.0
clusterweight 8: 0.0
clusterweight 9: 0.0
clusterweight 10: 2.0
clusterweight 11: 0.0
clusterweight 12: 0.0
clusterweight 13: 2.0
clusterweight 14: tensor(8)
clusterweight 15: 0.0
clusterweight 16: 0.0
clusterweight 17: 3.0
clusterweight 18: 0.0
clusterweight 19: 0.0
clusterweight 20: 0.0
clusterweight 21: 2.0
clusterweight 22: 0.0
clusterweight 23: 0.0
clusterweight 24: 0.0
clusterweight 25: 0.0
clusterweight 26: 3.0
clusterweight 27: 2.0
clusterweight 28: 0.0
clusterweight 29: 3.0
clusterweight 30: 2.0
clusterweight 31: 0.0
clusterweight 32: 0.0
clusterweight 33: 3.0
clusterweight 34: 0.0
clusterweight 35: 2.0
clusterweight 36: 0.0
clusterweight 37: 0.0
clusterweight 38: 2.0
clusterweight 39: 0.0
clusterweight 40: 8.0
clusterweight 41: 3.0
clusterweight 42: 0.0
clusterweight 43: 8.0
clusterweight 44: 0.0
clusterweight 45: 3.0
clusterweight 46: 8.0
clusterweight 47: 0.0
clusterweight 48: 0.0
clusterweight 49: 0.0
clusterweight 50: 8.0
clusterweight 51: 2.0
clusterweight 52: 2.0
clusterweight 53: 8.0
clusterweight 54: 2.0
clusterweight 55: 2.0
clusterweight 56: 3.0
clusterweight 57: 2.0
clusterweight 58: tensor(8)
clusterweight 59: 0.0
clusterweight 60: 2.0
clusterweight 61: 2.0
clusterweight 62: 2.0
clusterweight 63: 2.0
clusterweight 64: 2.0
clusterweight 65: 8.0
clusterweight 66: 2.0
clusterweight 67: 8.0
clusterweight 68: 2.0
clusterweight 69: 2.0
clusterweight 70: 0.0
clusterweight 71: 0.0
clusterweight 72: 2.0
clusterweight 73: 3.0
clusterweight 74: 2.0
clusterweight 75: 0.0
clusterweight 76: 0.0
clusterweight 77: 2.0
clusterweight 78: 0.0
clusterweight 79: 2.0
clusterweight 80: 2.0
clusterweight 81: 0.0
clusterweight 82: 0.0
clusterweight 83: 2.0
clusterweight 84: 3.0
clusterweight 85: 2.0
clusterweight 86: 2.0
clusterweight 87: 2.0
clusterweight 88: 2.0
clusterweight 89: 2.0
clusterweight 90: 8.0
clusterweight 91: 2.0
clusterweight 92: 8.0
clusterweight 93: 2.0
clusterweight 94: 8.0
clusterweight 95: 2.0
clusterweight 96: 2.0
clusterweight 97: 2.0
clusterweight 98: 2.0
clusterweight 99: 8.0
gapweight0: 8.0gapweight100: 4.0gapweight200: 3.0gapweight300: 2.0gapweight400: 2.0gapweight500: tensor(8)gapweight600: 0.0gapweight700: 2.0gapweight800: 2.0gapweight900: 2.0gapweight1000: 0.0gapweight1100: 0.0gapweight1200: 0.0gapweight1300: 2.0gapweight1400: 2.0gapweight1500: 0.0gapweight1600: 0.0gapweight1700: 0.0gapweight1800: 0.0gapweight1900: 0.0gapweight2000: 2.0gapweight2100: 0.0gapweight2200: 0.0gapweight2300: 0.0gapweight2400: 0.0gapweight2500: 0.0gapweight2600: 0.0gapweight2700: 3.0gapweight2800: tensor(8)gapweight2900: 0.0gapweight3000: 0.0gapweight3100: 0.0gapweight3200: 3.0gapweight3300: 0.0gapweight3400: 0.0gapweight3500: 0.0gapweight3600: 0.0gapweight3700: 0.0gapweight3800: 3.0gapweight3900: 0.0gapweight4000: 0.0gapweight4100: 0.0gapweight4200: 0.0gapweight4300: 0.0gapweight4400: 0.0gapweight4500: 0.0gapweight4600: 0.0gapweight4700: 0.0gapweight4800: 0.0gapweight4900: 2.0
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment