Commit 083bb0a5 authored by Mirza Cutuk's avatar Mirza Cutuk
Browse files

GROUPWORK_train+test functions refactoring

parent 603051ba
from net import *
##train+test functions (we'll see if we do 1 function for all train, and 1 for all testing)
# def train_model(model, input, output, criterion, mini_batch_size, nb_epochs):
## BASIC MODEL
def train_model_basic(model, train_input, train_target, mini_batch_size, nb_epochs = 100):
criterion = nn.BCELoss()
eta = 1e-1
## Train Model, works for both Basic + Weight Sharing Networks
def train_model(model, train_input, train_target, lr, criterion, mini_batch_size, nb_epochs = 100):
loss_list = []
acc_list = []
for e in range(nb_epochs):
acc_loss = 0
nb_errors = 0
for b in range(0, train_input.size(0), mini_batch_size):
output = model(train_input.narrow(0, b, mini_batch_size))
......@@ -21,49 +16,61 @@ def train_model_basic(model, train_input, train_target, mini_batch_size, nb_epoc
acc_loss = acc_loss + loss.item()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=0)
# model.zero_grad()
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
for p in model.parameters():
p -= eta * p.grad
p -= lr * p.grad
print(output)
output_b = (output.view(-1)>0.5).float()
print(output_b)
for k in range(mini_batch_size):
if y[k] != output_b[k]:
nb_errors = nb_errors + 1
print(e, acc_loss)
# print(e, acc_loss)
loss_list.append(acc_loss)
return loss_list
accuracy_e = 1 - nb_errors/train_input.size(0)
# print(f'{e}, acc: {accuracy_e}')
acc_list.append(accuracy_e)
def compute_nb_errors(model, input, target, mini_batch_size):
nb_errors = 0
return loss_list, acc_list
for b in range(0, input.size(0), mini_batch_size):
output = model(input.narrow(0, b, mini_batch_size))
## Test Model, works for both Basic + Weight Sharing Networks
def test_model(model, test_input, test_target, criterion, mini_batch_size):
nb_errors = 0
for b in range(0, test_input.size(0), mini_batch_size):
output = model(test_input.narrow(0, b, mini_batch_size))
output_b = (output.view(-1)>0.5).float()
y = test_target.narrow(0, b, mini_batch_size)
for k in range(mini_batch_size):
if target[b+k] != output_b[k]:
if y[k] != output_b[k]:
nb_errors = nb_errors + 1
accuracy = 1 - nb_errors/test_input.size(0)
return accuracy
return nb_errors
def compute_nb_errors_test(model, input, target, mini_batch_size, nb_epochs):
loss_list = []
criterion = nn.BCELoss()
nb_errors = 0
for e in range(nb_epochs):
acc_loss = 0
for b in range(0, input.size(0), mini_batch_size):
output = model(input.narrow(0, b, mini_batch_size))
output_b = (output.view(-1)>0.5).float()
y = target.narrow(0, b, mini_batch_size)
loss = criterion(output.view(-1), y.float())
acc_loss = acc_loss + loss.item()
if(e==(nb_epochs-1)):
for k in range(mini_batch_size):
if target[b+k] != output_b[k]:
nb_errors = nb_errors + 1
loss_list.append(acc_loss)
return nb_errors, loss_list
# def compute_nb_errors_test(model, input, target, mini_batch_size, nb_epochs):
# loss_list = []
# criterion = nn.BCELoss()
# nb_errors = 0
# for e in range(nb_epochs):
# acc_loss = 0
# for b in range(0, input.size(0), mini_batch_size):
# output = model(input.narrow(0, b, mini_batch_size))
# output_b = (output.view(-1)>0.5).float()
# y = target.narrow(0, b, mini_batch_size)
# loss = criterion(output.view(-1), y.float())
# acc_loss = acc_loss + loss.item()
# if(e==(nb_epochs-1)):
# for k in range(mini_batch_size):
# if target[b+k] != output_b[k]:
# nb_errors = nb_errors + 1
# loss_list.append(acc_loss)
# return nb_errors, loss_list
# ## Classification
# def train_model_class(model, train_input, train_target, mini_batch_size, nb_epochs = 100):
......@@ -156,40 +163,41 @@ def compute_nb_errors_test(model, input, target, mini_batch_size, nb_epochs):
# return nb_errors
## Model 3: WS + AL
def train_model_WSAL(model, train_input, train_target, train_classes, mini_batch_size, nb_epochs = 100):
criterion1 = nn.CrossEntropyLoss()
criterion2 = nn.BCELoss()
eta = 1e-1
## Train Model WSAL, works for Weight Sharing with Auxiliary Loss
def train_model_WSAL(model, train_input, train_target, train_classes, lr, criterion_class, criterion_comp, mini_batch_size, nb_epochs):
loss_list = []
acc_list = []
for e in range(nb_epochs):
acc_loss = 0
nb_errors = 0
for b in range(0, train_input.size(0), mini_batch_size):
out, res = model(train_input.narrow(0, b, mini_batch_size))
y1 = train_classes[:,0].narrow(0, b, mini_batch_size)
y2 = train_classes[:,1].narrow(0, b, mini_batch_size)
loss1 = criterion1(out[:,0:10], y1)
loss2 = criterion1(out[:,10:20], y2)
loss1 = criterion_class(out[:,0:10], y1)
loss2 = criterion_class(out[:,10:20], y2)
y = train_target.narrow(0, b, mini_batch_size)
loss3 = criterion2(res.view(-1), y.float())
loss3 = criterion_comp(res.view(-1), y.float())
loss = 0.5*loss1 + 0.5*loss2 + 1.0*loss3
acc_loss = acc_loss + loss.item()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-5)
optimizer = torch.optim.Adam(model.parameters(), lr, weight_decay=1e-5)
optimizer.zero_grad()
# model.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
for p in model.parameters():
p -= eta * p.grad
p -= lr * p.grad
print(e, acc_loss)
loss_list.append(acc_loss)
accuracy_e = 1 - nb_errors/train_input.size(0)
print(f'{e}, acc: {accuracy_e}')
acc_list.append(accuracy_e)
def compute_nb_errors_AL(model, input, target, mini_batch_size):
def test_model_WSAL(model, input, target, mini_batch_size):
nb_errors = 0
for b in range(0, input.size(0), mini_batch_size):
......@@ -199,4 +207,4 @@ def compute_nb_errors_AL(model, input, target, mini_batch_size):
if target[b+k] != output_b[k]:
nb_errors = nb_errors + 1
return nb_errors
\ No newline at end of file
return nb_errors
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment