Commit cfac83f7 authored by Elif Ceylan's avatar Elif Ceylan
Browse files

GROUPWORK_clean up and keep the necessary

parent 34a3e662
# External
import torch
import matplotlib.pyplot as plt
import matplotlib.patches as ptc
import math
# Internal
import modules as n
import SGD
from train import train_model, test_model
from net import Network
from helper import generate_disc_set
# autograd globally off
torch.set_grad_enabled(False)
def generate_disc_set(nb):
input = torch.empty(nb, 2).uniform_(0, 1)
target = input.add(-0.5).pow(2).sum(1).sub(1/(2*math.pi)).sign().add(-3).div(2).long()
return input, ~target
# generate train and test data
train_input, train_target = generate_disc_set(1000)
test_input, test_target = generate_disc_set(1000)
# normalize train and test inputs
mean, std = train_input.mean(), train_input.std()
train_input.sub_(mean).div_(std)
test_input.sub_(mean).div_(std)
# network parameters
lr = 5e-3
gamma = 0.9
mini_batch_size = 25
nb_epochs = 500
......@@ -33,13 +36,13 @@ nb_epochs = 500
# plt.gca().set_aspect('equal', adjustable='box')
# plt.show()
# Dictionary of networks to test
networks = {
1: [
n.Linear(2, 25),
n.ReLU(),
n.Linear(25, 25),
# n.ReLU(),
# n.Linear(25, 25),
n.ReLU(),
n.Linear(25, 1),
n.Sigmoid()
......@@ -49,158 +52,27 @@ networks = {
n.leakyReLU(),
n.Linear(25, 25),
n.leakyReLU(),
n.Linear(25, 25),
n.leakyReLU(),
n.Linear(25, 1),
n.sigma()
],
n.Sigmoid()
],
3: [
n.Linear(2, 25),
n.leakyReLU(),
n.Linear(25, 25),
n.leakyReLU(),
n.Linear(25, 25),
n.leakyReLU(),
n.Linear(25, 1),
n.Sigmoid()
n.Tanh()
],
}
test = [
n.Linear(2, 25),
n.ReLU(),
n.Linear(25, 25),
n.ReLU(),
n.Linear(25, 25),
n.ReLU(),
n.Linear(25, 1),
n.Sigmoid()
]
class Network():
def __init__(self, mods, input_size, output_size, hidden) -> None:
self.mods = mods
# forward
def forward(self, train_input):
x = n.Sequential(self.mods).forward(train_input)
return x
# backward
def backward(self, g_loss):
y = n.Sequential(self.mods).backward(g_loss)
return y
#get params
def param(self):
return n.Sequential(self.mods).param()
# update params
def update_params(self, lr):
n.Sequential(self.mods).update_params(lr)
# zero grad
def zero_grad(self):
n.Sequential(self.mods).zero_grad()
def train_model(model, train_input, train_target, criterion, mini_batch_size=100, nb_epochs=100):
lr = 5e-3
gamma = 0.9
optimizer = SGD.SGD_Optim(model, mini_batch_size, lr, gamma)
loss_list = []
nb_errors = 0
# print(train_input)
for e in range(nb_epochs):
acc_loss = 0
nb_errors = 0
for b in range(0, train_input.size(0), mini_batch_size):
## forward
output = model.forward(train_input.narrow(0, b, mini_batch_size))
y = train_target.narrow(0, b, mini_batch_size)
## loss
loss = criterion.forward(output.squeeze(), y)
acc_loss = acc_loss + loss.item()
## zero grad
# model.zero_grad()
optimizer.zero_grad()
# optimizer = torch.optim.SGD(model.param(), lr=1e-2, momentum=0.9)
# optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
## gradient of loss
g_loss = criterion.backward(output.squeeze(), y)
## backward
model.backward(g_loss)
## gradient step
# model.update_params(lr)
optimizer.step()
# with torch.no_grad():
# for p in model.parameters():
# p[0] -= lr * p[1]
# if(e==(nb_epochs-1)):
output[output<=0.5]=0
output[output>0.5]=1
for k in range(mini_batch_size):
# print(y[k], output[k])
if y[k] != output[k]:
nb_errors = nb_errors + 1
print(f'acc: {1 - nb_errors/1000}')
print(e, acc_loss/1000)
loss_list.append(acc_loss)
return nb_errors, loss_list
def test_model(model, test_input, test_target, mini_batch_size):
nb_errors = 0
for b in range(0, test_input.size(0), mini_batch_size):
output = model.forward(test_input.narrow(0, b, mini_batch_size))
output[output<=0.5]=0
output[output>0.5]=1
y = test_target.narrow(0, b, mini_batch_size)
for k in range(mini_batch_size):
# print(y[k], output[k])
if y[k] != output[k]:
nb_errors = nb_errors + 1
return nb_errors
model = Network(networks[1], input_size=2, output_size=1, hidden=25)
# initialize a network from the networks dictionary
model = Network(networks[2], input_size=2, output_size=1, hidden=25)
# train the network
nb_errors, loss_list = train_model(model, train_input, train_target, n.BCE(), mini_batch_size, nb_epochs)
# test the network
nb_errors_test = test_model(model, test_input, test_target, mini_batch_size)
print(f'acc_train = {1-nb_errors/1000}')
print(f'acc_test = {1-nb_errors_test/1000}')
# def train_model(model, train_input, train_target):
# criterion = modules.CrossEntropyLoss()
# # optimizer = optim.SGD(model.parameters(), lr = 1e-1)
# nb_epochs = 250
# for e in range(nb_epochs):
# for b in range(0, train_input.size(0), mini_batch_size):
# output = model(train_input.narrow(0, b, mini_batch_size))
# loss = criterion(output, train_target.narrow(0, b, mini_batch_size))
# model.zero_grad()
# loss.backward()
# # optimizer.step()
# ######################################################################
# def compute_nb_errors(model, data_input, data_target):
# nb_data_errors = 0
# for b in range(0, data_input.size(0), mini_batch_size):
# output = model(data_input.narrow(0, b, mini_batch_size))
# _, predicted_classes = torch.max(output, 1)
# for k in range(mini_batch_size):
# if data_target[b + k] != predicted_classes[k]:
# nb_data_errors = nb_data_errors + 1
# print training and testing accuracies
print(f'acc_train = {1-nb_errors/train_input.size(0)}')
print(f'acc_test = {1-nb_errors_test/test_input.size(0)}')
# return nb_data_errors
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment