Commit 1e34c19f authored by Mira Arabi Haddad's avatar Mira Arabi Haddad
Browse files

GROUPWORK_Finalize test.py

parent aafe3640
......@@ -4,66 +4,83 @@ from net import *
from train import *
from helper import *
from torch import nn
from statistics import *
# ## file that they will run
# dictionary to save training statistics
results_train = {
"NN": {"u":0,"std":0},
"CNN": {"u":0,"std":0},
"WS_NN": {"u":0,"std":0},
"WS_CNN": {"u":0,"std":0},
"WSAL_NN": {"u":0,"std":0},
"WSAL_CNN": {"u":0,"std":0},
}
# ## Basic Model
# model1 = CNN_VGG(200)
# model = CNN_VGG(200)
# dictionary to save testing statistics
results_test = {
"NN": {"u":0,"std":0},
"CNN": {"u":0,"std":0},
"WS_NN": {"u":0,"std":0},
"WS_CNN": {"u":0,"std":0},
"WSAL_NN": {"u":0,"std":0},
"WSAL_CNN": {"u":0,"std":0},
}
mini_batch_size=100
lr = 1e-4
lr = 1e-3
# lists for plotting
acc_train_NN_list, acc_test_NN_list, acc_train_CNN_list, acc_test_CNN_list = [], [], [], []
acc_train_WS_NN_list, acc_test_WS_NN_list, acc_train_WS_CNN_list, acc_test_WS_CNN_list = [], [], [], []
acc_train_WSAL_NN_list, acc_test_WSAL_NN_list, acc_train_WSAL_CNN_list, acc_test_WSAL_CNN_list = [], [], [], []
for _ in range(1):
# 190 runs with different dataset each time
for _ in range(10):
# basic models
model_basic_NN = NN(200)
model_basic_CNN = CNN_VGG(200)
# models with weight sharing
model_WS_NN = NN_WS(200)
model_WS_CNN = CNN_WS(200)
model_WS_NN = NN_WS(300)
model_WS_CNN = CNN_WS(300)
# models with weight sharing and auxiliary loss
model_WSAL_NN = NN_WS_AL(200)
model_WSAL_CNN = CNN_WS_AL(200)
model_WSAL_NN = NN_WS_AL(300)
model_WSAL_CNN = CNN_WS_AL(300)
train_input, train_target, train_classes, test_input, test_target, test_classes = prologue.generate_pair_sets(1000)
# # train + test basic models (NN+CNN)
# #NN
# loss_train_NN, acc_train_NN = train_model(model_basic_NN, train_input, train_target, lr, nn.BCELoss(), mini_batch_size, nb_epochs=25)
# acc_test_NN = test_model(model_basic_NN, test_input, test_target, mini_batch_size)
# acc_train_NN_list.append(acc_train_NN[-1])
# acc_test_NN_list.append(acc_test_NN)
# print(f' NN: Basic Network train_acc = {acc_train_NN[-1]}')
# print(f' NN: Basic Network test_acc = {acc_test_NN}')
# #CNN
# loss_train_CNN, acc_train_CNN = train_model(model_basic_CNN, train_input, train_target, lr, nn.BCELoss(), mini_batch_size, nb_epochs=25)
# acc_test_CNN = test_model(model_basic_CNN, test_input, test_target, mini_batch_size)
# acc_train_CNN_list.append(acc_train_CNN[-1])
# acc_test_CNN_list.append(acc_test_CNN)
# print(f' CNN: Basic Network train_acc = {acc_train_CNN[-1]}')
# print(f' CNN: Basic Network test_acc = {acc_test_CNN}')
# # train + test Weight Sharing models (NN+CNN)
# #NN
# loss_train_WS_NN, acc_train_WS_NN = train_model(model_WS_NN, train_input, train_target, lr, nn.BCELoss(), mini_batch_size, nb_epochs=25)
# acc_test_WS_NN = test_model(model_WS_NN, test_input, test_target, mini_batch_size)
# acc_train_WS_NN_list.append(acc_train_WS_NN[-1])
# acc_test_WS_NN_list.append(acc_test_WS_NN)
# print(f' NN: Weight Sharing Network train_acc = {acc_train_WS_NN[-1]}')
# print(f' NN: Weight Sharing Network test_acc = {acc_test_WS_NN}')
# #CNN
# loss_train_WS_CNN, acc_train_WS_CNN = train_model(model_WS_CNN, train_input, train_target, lr, nn.BCELoss(), mini_batch_size, nb_epochs=25)
# acc_test_WS_CNN = test_model(model_WS_CNN, test_input, test_target, mini_batch_size)
# acc_train_WS_CNN_list.append(acc_train_WS_CNN[-1])
# acc_test_WS_CNN_list.append(acc_test_WS_CNN)
# print(f' CNN: Weight Sharing Network train_acc = {acc_train_WS_CNN[-1]}')
# print(f' CNN: Weight Sharing Network test_acc = {acc_test_WS_CNN}')
# train + test basic models (NN+CNN)
#NN
loss_train_NN, acc_train_NN = train_model(model_basic_NN, train_input, train_target, lr, nn.BCELoss(), mini_batch_size, nb_epochs=25)
acc_test_NN = test_model(model_basic_NN, test_input, test_target, mini_batch_size)
acc_train_NN_list.append(acc_train_NN[-1])
acc_test_NN_list.append(acc_test_NN)
print(f' NN: Basic Network train_acc = {acc_train_NN[-1]*100}')
print(f' NN: Basic Network test_acc = {acc_test_NN*100}')
#CNN
loss_train_CNN, acc_train_CNN = train_model(model_basic_CNN, train_input, train_target, lr, nn.BCELoss(), mini_batch_size, nb_epochs=25)
acc_test_CNN = test_model(model_basic_CNN, test_input, test_target, mini_batch_size)
acc_train_CNN_list.append(acc_train_CNN[-1])
acc_test_CNN_list.append(acc_test_CNN)
print(f' CNN: Basic Network train_acc = {acc_train_CNN[-1]*100}')
print(f' CNN: Basic Network test_acc = {acc_test_CNN*100}')
# train + test Weight Sharing models (NN+CNN)
#NN
loss_train_WS_NN, acc_train_WS_NN = train_model(model_WS_NN, train_input, train_target, lr, nn.BCELoss(), mini_batch_size, nb_epochs=35)
acc_test_WS_NN = test_model(model_WS_NN, test_input, test_target, mini_batch_size)
acc_train_WS_NN_list.append(acc_train_WS_NN[-1])
acc_test_WS_NN_list.append(acc_test_WS_NN)
print(f' NN: Weight Sharing Network train_acc = {acc_train_WS_NN[-1]*100}')
print(f' NN: Weight Sharing Network test_acc = {acc_test_WS_NN*100}')
#CNN
loss_train_WS_CNN, acc_train_WS_CNN = train_model(model_WS_CNN, train_input, train_target, lr, nn.BCELoss(), mini_batch_size, nb_epochs=35)
acc_test_WS_CNN = test_model(model_WS_CNN, test_input, test_target, mini_batch_size)
acc_train_WS_CNN_list.append(acc_train_WS_CNN[-1])
acc_test_WS_CNN_list.append(acc_test_WS_CNN)
print(f' CNN: Weight Sharing Network train_acc = {acc_train_WS_CNN[-1]*100}')
print(f' CNN: Weight Sharing Network test_acc = {acc_test_WS_CNN*100}')
# train + test Weight Sharing+Auxiliary Loss models (NN+CNN)
#NN
......@@ -71,47 +88,65 @@ for _ in range(1):
acc_test_WSAL_NN = test_model_WSAL(model_WSAL_NN, test_input, test_target, mini_batch_size)
acc_train_WSAL_NN_list.append(acc_train_WSAL_NN[-1])
acc_test_WSAL_NN_list.append(acc_test_WSAL_NN)
print(f' NN: Weight Sharing + Auxiliary Loss Network train_acc = {acc_train_WSAL_NN[-1]}')
print(f' NN: Weight Sharing + Auxiliary Loss Network test_acc = {acc_test_WSAL_NN}')
print(f' NN: Weight Sharing + Auxiliary Loss Network train_acc = {acc_train_WSAL_NN[-1]*100}')
print(f' NN: Weight Sharing + Auxiliary Loss Network test_acc = {acc_test_WSAL_NN*100}')
#CNN
loss_train_WSAL_CNN, acc_train_WSAL_CNN = train_model_WSAL(model_WSAL_CNN, train_input, train_target, train_classes, lr, nn.CrossEntropyLoss(), nn.BCELoss(), mini_batch_size, nb_epochs=40)
acc_test_WSAL_CNN = test_model_WSAL(model_WSAL_CNN, test_input, test_target, mini_batch_size)
acc_train_WSAL_CNN_list.append(acc_train_WSAL_CNN[-1])
acc_test_WSAL_CNN_list.append(acc_test_WSAL_CNN)
print(f' CNN: Weight Sharing + Auxiliary Loss Network train_acc = {acc_train_WSAL_CNN[-1]}')
print(f' CNN: Weight Sharing + Auxiliary Loss Network test_acc = {acc_test_WSAL_CNN}')
# model2 = NN_Classification(200)
# output = train_model_class(model2, train_input, train_classes, mini_batch_size, nb_epochs=100)
# # output1, output2 = train_model_class(model2, train_input, train_classes, mini_batch_size, nb_epochs=100)
# output_class = torch.cat((output1, output2), 1).detach()
# # print(output_class.size())
# # errors = compute_nb_errors_class(model2, train_input, train_classes, mini_batch_size)
# # print((1-errors/20000)*100)
# # print(output1.size(), output2.size())
# # print(torch.cat((output1, output2), 1).size())
# model3 = MLP_Comparer(200)
# train_model_comp(model3, output_class, train_target, mini_batch_size, nb_epochs=100)
# # error_comp = compute_nb_errors_comp(model3, output_class, train_target, mini_batch_size)
# # print(f'accuracy of comparing = {100-(error_comp/10)}')
# for _ in range(5):
# model_WS = CNN_WS(200)
# train_input, train_target, train_classes, test_input, test_target, test_classes = prologue.generate_pair_sets(1000)
# train_model(model_WS, train_input, train_target, mini_batch_size, nb_epochs=25)
# # errors_WS = compute_nb_errors(model_WS, train_input, train_target, mini_batch_size)
# # errors_WS_test = compute_nb_errors(model_WS, test_input, test_target, mini_batch_size)
# # print(f'accuracy of Weight Sharing = {100-(errors_WS/10)}')
# # print(f'accuracy of Weight Sharing, testing = {100-(errors_WS_test/10)}')
# print(f' train_acc = {acc_train[-1]}')
# print(f' test_acc = {acc_test}')
# for _ in range(1):
# model_WSAL = CNN_WS_AL(200)
# train_input, train_target, train_classes, test_input, test_target, test_classes = prologue.generate_pair_sets(1000)
# train_model_WSAL(model_WSAL, train_input, train_target, train_classes, mini_batch_size, nb_epochs=40)
# errors_WSAL = compute_nb_errors_AL(model_WSAL, train_input, train_target, mini_batch_size)
# errors_WSAL_test = compute_nb_errors_AL(model_WSAL, test_input, test_target, mini_batch_size)
# print(f'accuracy of Weight Sharing +AL= {100-(errors_WSAL/10)}')
# print(f'accuracy of Weight Sharing, testing +AL= {100-(errors_WSAL_test/10)}')
\ No newline at end of file
print(f' CNN: Weight Sharing + Auxiliary Loss Network train_acc = {acc_train_WSAL_CNN[-1]*100}')
print(f' CNN: Weight Sharing + Auxiliary Loss Network test_acc = {acc_test_WSAL_CNN*100}')
# save results (mean and standard deviation)
results_train["NN"]["u"], results_train["NN"]["std"] = round(100*mean(acc_train_NN_list), 2), round(100*stdev(acc_train_NN_list), 2)
results_test["NN"]["u"], results_test["NN"]["std"] = round(100*mean(acc_test_NN_list), 2), round(100*stdev(acc_test_NN_list), 2)
results_train["CNN"]["u"], results_train["CNN"]["std"] = round(100*mean(acc_train_CNN_list), 2), round(100*stdev(acc_train_CNN_list), 2)
results_test["CNN"]["u"], results_test["CNN"]["std"] = round(100*mean(acc_test_CNN_list), 2), round(100*stdev(acc_test_CNN_list), 2)
results_train["WS_NN"]["u"], results_train["WS_NN"]["std"] = round(100*mean(acc_train_WS_NN_list), 2), round(100*stdev(acc_train_WS_NN_list), 2)
results_test["WS_NN"]["u"], results_test["WS_NN"]["std"] = round(100*mean(acc_test_WS_NN_list), 2), round(100*stdev(acc_test_WS_NN_list), 2)
results_train["WS_CNN"]["u"], results_train["WS_CNN"]["std"] = round(100*mean(acc_train_WS_CNN_list), 2), round(100*stdev(acc_train_WS_CNN_list), 2)
results_test["WS_CNN"]["u"], results_test["WS_CNN"]["std"] = round(100*mean(acc_test_WS_CNN_list), 2), round(100*stdev(acc_test_WS_CNN_list), 2)
results_train["WSAL_NN"]["u"], results_train["WSAL_NN"]["std"] = round(100*mean(acc_train_WSAL_NN_list), 2), round(100*stdev(acc_train_WSAL_NN_list), 2)
results_test["WSAL_NN"]["u"], results_test["WSAL_NN"]["std"] = round(100*mean(acc_test_WSAL_NN_list), 2), round(100*stdev(acc_test_WSAL_NN_list), 2)
results_train["WSAL_CNN"]["u"], results_train["WSAL_CNN"]["std"] = round(100*mean(acc_train_WSAL_CNN_list), 2), round(100*stdev(acc_train_WSAL_CNN_list), 2)
results_test["WSAL_CNN"]["u"], results_test["WSAL_CNN"]["std"] = round(100*mean(acc_test_WSAL_CNN_list), 2), round(100*stdev(acc_test_WSAL_CNN_list), 2)
print(f'Training Results: {results_train}')
print(f'Testing Results: {results_test}')
# for the results in last run, plot train accuracies, and train losses for the NN and CNN of each architecture
# Hence, 4 plots
# dictionaries for plotting
d_plot1 = {
'loss_train_NN':loss_train_NN,
'loss_train_WS_NN':loss_train_WS_NN,
'loss_train_WSAL_NN':loss_train_WSAL_NN
}
d_plot2 = {
'acc_train_NN':acc_train_NN,
'acc_train_WS_NN':acc_train_WS_NN,
'acc_train_WSAL_NN':acc_train_WSAL_NN
}
d_plot3 = {
'loss_train_CNN':loss_train_CNN,
'loss_train_WS_CNN':loss_train_WS_CNN,
'loss_train_WSAL_CNN':loss_train_WSAL_CNN
}
d_plot4 = {
'acc_train_CNN':acc_train_CNN,
'acc_train_WS_CNN':acc_train_WS_CNN,
'acc_train_WSAL_CNN':acc_train_WSAL_CNN
}
# Plot loss and accuracy
plot_figures(d_plot1, 'Training Loss NN')
plot_figures(d_plot2, 'Training Accuracy NN', acc=True)
plot_figures(d_plot3, 'Training Loss CNN')
plot_figures(d_plot4, 'Training Accuracy CNN', acc=True)
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment