Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Mira Arabi Haddad
DL Mini Projects Group M
Commits
92480fd3
Commit
92480fd3
authored
Dec 16, 2021
by
Mira Arabi Haddad
Browse files
GROUPWORK_Refactor+added NN for each
parent
99288183
Changes
1
Hide whitespace changes
Inline
Side-by-side
p1/net.py
View file @
92480fd3
from
os
import
error
import
dlc_practical_prologue
as
prologue
#
import dlc_practical_prologue as prologue
import
torch
from
torch
import
nn
from
torch.nn
import
functional
as
F
...
...
@@ -8,10 +8,28 @@ from torch.nn.modules.batchnorm import BatchNorm2d
from
torch.nn.modules.conv
import
Conv2d
from
torch.nn.modules.dropout
import
Dropout
from
torch.nn.modules.pooling
import
MaxPool2d
import
matplotlib.pyplot
as
plt
# import matplotlib.pyplot as plt
## Basic Model
class
NN
(
nn
.
Module
):
def
__init__
(
self
,
nb_hidden
):
super
().
__init__
()
self
.
classifier
=
nn
.
Sequential
(
nn
.
Linear
(
392
,
nb_hidden
,
bias
=
True
),
nn
.
ReLU
(
inplace
=
True
),
nn
.
BatchNorm1d
(
nb_hidden
),
nn
.
Linear
(
nb_hidden
,
nb_hidden
,
bias
=
True
),
nn
.
ReLU
(
inplace
=
True
),
nn
.
BatchNorm1d
(
nb_hidden
),
nn
.
Linear
(
nb_hidden
,
1
,
bias
=
True
),
Sigmoid
()
)
def
forward
(
self
,
x
):
x
=
x
.
view
(
-
1
,
392
)
x
=
self
.
classifier
(
x
)
return
x
class
CNN_VGG
(
nn
.
Module
):
def
__init__
(
self
,
nb_hidden
):
super
().
__init__
()
...
...
@@ -31,15 +49,13 @@ class CNN_VGG(nn.Module):
)
self
.
classifier
=
nn
.
Sequential
(
nn
.
Linear
(
576
,
288
),
nn
.
Linear
(
576
,
nb_hidden
),
nn
.
ReLU
(
inplace
=
True
),
## add batchnorm1d?
## we removed MaxPool here
nn
.
Dropout
(
p
=
0.5
),
nn
.
Linear
(
288
,
144
),
nn
.
Linear
(
nb_hidden
,
nb_hidden
),
nn
.
ReLU
(
inplace
=
True
),
nn
.
Dropout
(
p
=
0.5
),
nn
.
Linear
(
144
,
1
),
nn
.
Linear
(
nb_hidden
,
1
),
Sigmoid
()
)
...
...
@@ -49,67 +65,27 @@ class CNN_VGG(nn.Module):
x
=
self
.
classifier
(
x
)
return
x
def
train_model
(
model
,
train_input
,
train_target
,
mini_batch_size
,
nb_epochs
=
100
):
criterion
=
nn
.
BCELoss
()
eta
=
1e-1
loss_list
=
[]
for
e
in
range
(
nb_epochs
):
acc_loss
=
0
for
b
in
range
(
0
,
train_input
.
size
(
0
),
mini_batch_size
):
output
=
model
(
train_input
.
narrow
(
0
,
b
,
mini_batch_size
))
y
=
train_target
.
narrow
(
0
,
b
,
mini_batch_size
)
loss
=
criterion
(
output
.
view
(
-
1
),
y
.
float
())
acc_loss
=
acc_loss
+
loss
.
item
()
optimizer
=
torch
.
optim
.
Adam
(
model
.
parameters
(),
lr
=
1e-4
,
weight_decay
=
0
)
# model.zero_grad()
optimizer
.
zero_grad
()
loss
.
backward
()
optimizer
.
step
()
with
torch
.
no_grad
():
for
p
in
model
.
parameters
():
p
-=
eta
*
p
.
grad
print
(
e
,
acc_loss
)
loss_list
.
append
(
acc_loss
)
return
loss_list
def
compute_nb_errors
(
model
,
input
,
target
,
mini_batch_size
):
nb_errors
=
0
for
b
in
range
(
0
,
input
.
size
(
0
),
mini_batch_size
):
output
=
model
(
input
.
narrow
(
0
,
b
,
mini_batch_size
))
output_b
=
(
output
.
view
(
-
1
)
>
0.5
).
float
()
for
k
in
range
(
mini_batch_size
):
if
target
[
b
+
k
]
!=
output_b
[
k
]:
nb_errors
=
nb_errors
+
1
return
nb_errors
def
compute_nb_errors_test
(
model
,
input
,
target
,
mini_batch_size
,
nb_epochs
):
loss_list
=
[]
criterion
=
nn
.
BCELoss
()
nb_errors
=
0
for
e
in
range
(
nb_epochs
):
acc_loss
=
0
for
b
in
range
(
0
,
input
.
size
(
0
),
mini_batch_size
):
output
=
model
(
input
.
narrow
(
0
,
b
,
mini_batch_size
))
output_b
=
(
output
.
view
(
-
1
)
>
0.5
).
float
()
y
=
train_target
.
narrow
(
0
,
b
,
mini_batch_size
)
loss
=
criterion
(
output
.
view
(
-
1
),
y
.
float
())
acc_loss
=
acc_loss
+
loss
.
item
()
if
(
e
==
(
nb_epochs
-
1
)):
for
k
in
range
(
mini_batch_size
):
if
target
[
b
+
k
]
!=
output_b
[
k
]:
nb_errors
=
nb_errors
+
1
loss_list
.
append
(
acc_loss
)
return
nb_errors
,
loss_list
#Problem 1 Part 2:
## 2 Networks: Classifier + Comparer (Weight Sharing)
class
NN_Classification
(
nn
.
Module
):
def
__init__
(
self
,
nb_hidden
):
super
().
__init__
()
self
.
classifier
=
nn
.
Sequential
(
nn
.
Linear
(
196
,
nb_hidden
),
nn
.
ReLU
(
inplace
=
True
),
nn
.
BatchNorm1d
(
nb_hidden
),
nn
.
Linear
(
nb_hidden
,
nb_hidden
),
nn
.
ReLU
(
inplace
=
True
),
nn
.
BatchNorm1d
(
nb_hidden
),
nn
.
Linear
(
nb_hidden
,
10
),
Sigmoid
()
)
def
forward
(
self
,
x1
,
x2
):
x1
=
self
.
classifier
(
x1
.
view
(
-
1
,
196
))
x2
=
self
.
classifier
(
x2
.
view
(
-
1
,
196
))
return
torch
.
cat
((
x1
,
x2
),
1
)
class
CNN_Classification
(
nn
.
Module
):
def
__init__
(
self
,
nb_hidden
):
super
().
__init__
()
...
...
@@ -129,17 +105,15 @@ class CNN_Classification(nn.Module):
)
self
.
classifier
=
nn
.
Sequential
(
nn
.
Linear
(
576
,
288
),
nn
.
Linear
(
576
,
nb_hidden
),
nn
.
ReLU
(
inplace
=
True
),
## add batchnorm1d?
nn
.
BatchNorm1d
(
288
),
## we removed MaxPool here
nn
.
BatchNorm1d
(
nb_hidden
),
nn
.
Dropout
(
p
=
0.5
),
nn
.
Linear
(
288
,
144
),
nn
.
Linear
(
nb_hidden
,
nb_hidden
),
nn
.
ReLU
(
inplace
=
True
),
nn
.
BatchNorm1d
(
144
),
nn
.
BatchNorm1d
(
nb_hidden
),
nn
.
Dropout
(
p
=
0.5
),
nn
.
Linear
(
144
,
10
),
nn
.
Linear
(
nb_hidden
,
10
),
Softmax
(
dim
=
0
)
)
...
...
@@ -151,128 +125,40 @@ class CNN_Classification(nn.Module):
x1
=
self
.
classifier
(
x1
)
x2
=
self
.
classifier
(
x2
)
return
torch
.
cat
((
x1
,
x2
),
1
)
# return x1, x2
def
train_model_class
(
model
,
train_input
,
train_target
,
mini_batch_size
,
nb_epochs
=
100
):
criterion
=
nn
.
CrossEntropyLoss
()
eta
=
1e-1
for
e
in
range
(
nb_epochs
):
acc_loss
=
0
for
b
in
range
(
0
,
train_input
.
size
(
0
),
mini_batch_size
):
output
=
model
(
train_input
[:,
0
].
narrow
(
0
,
b
,
mini_batch_size
),
train_input
[:,
1
].
narrow
(
0
,
b
,
mini_batch_size
))
# y = train_target.narrow(0, b, mini_batch_size)
y1
=
train_target
[:,
0
].
narrow
(
0
,
b
,
mini_batch_size
)
y2
=
train_target
[:,
1
].
narrow
(
0
,
b
,
mini_batch_size
)
loss1
=
criterion
(
output
[:,
0
:
10
],
y1
)
loss2
=
criterion
(
output
[:,
10
:
20
],
y2
)
loss
=
loss1
+
loss2
acc_loss
=
acc_loss
+
loss
.
item
()
# add optimizer
optimizer
=
torch
.
optim
.
Adam
(
model
.
parameters
(),
lr
=
1e-3
,
weight_decay
=
0
)
# model.zero_grad()
optimizer
.
zero_grad
()
loss
.
backward
()
optimizer
.
step
()
with
torch
.
no_grad
():
for
p
in
model
.
parameters
():
p
-=
eta
*
p
.
grad
# print(e, acc_loss)
return
output
def
compute_nb_errors_class
(
model
,
input
,
target
,
mini_batch_size
):
nb_errors
=
0
for
b
in
range
(
0
,
input
.
size
(
0
),
mini_batch_size
):
output1
,
output2
=
model
(
input
[:,
0
].
narrow
(
0
,
b
,
mini_batch_size
),
input
[:,
1
].
narrow
(
0
,
b
,
mini_batch_size
))
y1
=
target
[:,
0
].
narrow
(
0
,
b
,
mini_batch_size
)
y2
=
target
[:,
1
].
narrow
(
0
,
b
,
mini_batch_size
)
_
,
predicted_classes1
=
output1
.
max
(
1
)
_
,
predicted_classes2
=
output2
.
max
(
1
)
for
k
in
range
(
mini_batch_size
):
if
y1
[
k
]
!=
predicted_classes1
[
k
]:
nb_errors
+=
1
if
y2
[
k
]
!=
predicted_classes2
[
k
]:
nb_errors
+=
1
return
nb_errors
class
MLP_Comparer
(
nn
.
Module
):
def
__init__
(
self
,
nb_hidden
):
super
().
__init__
()
# self.features = nn.Sequential(
# )
self
.
classifier
=
nn
.
Sequential
(
# input is concatenation of classification output:
# tensor of 20 elements (10 classes for each input)
nn
.
Linear
(
20
,
10
),
nn
.
Linear
(
20
,
nb_hidden
),
nn
.
ReLU
(
inplace
=
True
),
nn
.
BatchNorm1d
(
10
),
## add batchnorm1d?
## we removed MaxPool here
# nn.Dropout(p=0.25),
nn
.
Linear
(
10
,
5
),
nn
.
BatchNorm1d
(
nb_hidden
),
nn
.
Linear
(
nb_hidden
,
nb_hidden
),
nn
.
ReLU
(
inplace
=
True
),
nn
.
BatchNorm1d
(
5
),
# nn.Dropout(p=0.25),
nn
.
Linear
(
5
,
1
),
nn
.
BatchNorm1d
(
nb_hidden
),
nn
.
Linear
(
nb_hidden
,
1
),
Sigmoid
()
)
def
forward
(
self
,
x
):
# x = self.classifier(torch.cat((x1, x2), 1))
x
=
self
.
classifier
(
x
)
# return torch.cat((x1, x2), 1)
return
x
# def train_model_comp(model, class_input1, class_input2, train_target, mini_batch_size, nb_epochs = 100):
def
train_model_comp
(
model
,
class_input
,
train_target
,
mini_batch_size
,
nb_epochs
=
100
):
criterion
=
nn
.
BCELoss
()
eta
=
1e-1
for
e
in
range
(
nb_epochs
):
acc_loss
=
0
for
b
in
range
(
0
,
class_input
.
size
(
0
),
mini_batch_size
):
# output = model(class_input1.narrow(0, b, mini_batch_size), class_input2.narrow(0, b, mini_batch_size))
output
=
model
(
class_input
.
narrow
(
0
,
b
,
mini_batch_size
))
y
=
train_target
.
narrow
(
0
,
b
,
mini_batch_size
)
loss
=
criterion
(
output
.
view
(
-
1
),
y
.
float
())
acc_loss
=
acc_loss
+
loss
.
item
()
# add optimizer
optimizer
=
torch
.
optim
.
Adam
(
model
.
parameters
(),
lr
=
1e-4
,
weight_decay
=
0.9
)
# optimizer.zero_grad()
# model.zero_grad()
loss
.
backward
()
optimizer
.
step
()
# with torch.no_grad():
# for p in model.parameters():
# p -= eta * p.grad
print
(
e
,
acc_loss
)
def
compute_nb_errors_comp
(
model
,
input
,
target
,
mini_batch_size
):
nb_errors
=
0
for
b
in
range
(
0
,
input
.
size
(
0
),
mini_batch_size
):
# output= model(input[:,0:10].narrow(0, b, mini_batch_size), input[:,10:20].narrow(0, b, mini_batch_size))
output
=
model
(
input
.
narrow
(
0
,
b
,
mini_batch_size
))
y
=
target
.
narrow
(
0
,
b
,
mini_batch_size
)
for
k
in
range
(
mini_batch_size
):
if
output
[
k
]
!=
y
[
k
]:
nb_errors
+=
1
class
NN_WS
(
nn
.
Module
):
def
__init__
(
self
,
nb_hidden
):
super
().
__init__
()
self
.
classifier
=
NN_Classification
(
nb_hidden
)
self
.
comparer
=
MLP_Comparer
(
nb_hidden
)
return
nb_errors
def
forward
(
self
,
x
):
x
=
self
.
comparer
(
self
.
classifier
(
x
[:,
0
],
x
[:,
1
]))
return
x
class
CNN_WS
(
nn
.
Module
):
def
__init__
(
self
,
nb_hidden
):
...
...
@@ -286,42 +172,22 @@ class CNN_WS(nn.Module):
# return torch.cat((x1, x2), 1)
return
x
## NN WS+AL
class
NN_WS_AL
(
nn
.
Module
):
def
__init__
(
self
,
nb_hidden
):
super
().
__init__
()
self
.
classifier
=
NN_Classification
(
nb_hidden
)
self
.
comparer
=
MLP_Comparer
(
nb_hidden
)
## Basic Model
model1
=
CNN_VGG
(
200
)
mini_batch_size
=
100
# # target = 0 if x1 > x2, target = 1 if x1 <= x2
train_input
,
train_target
,
train_classes
,
test_input
,
test_target
,
test_classes
=
prologue
.
generate_pair_sets
(
1000
)
loss_train
=
train_model
(
model1
,
train_input
,
train_target
,
mini_batch_size
,
nb_epochs
=
19
)
errors
=
compute_nb_errors
(
model1
,
train_input
,
train_target
,
mini_batch_size
)
errors_test
,
loss_test
=
compute_nb_errors_test
(
model1
,
test_input
,
test_target
,
mini_batch_size
,
nb_epochs
=
19
)
print
(
f
'accuracy of Basic =
{
100
-
(
errors
/
10
)
}
'
)
print
(
f
'accuracy of Basic, testing =
{
100
-
(
errors_test
/
10
)
}
'
)
plt
.
plot
(
loss_train
)
plt
.
plot
(
loss_test
)
plt
.
legend
([
'train'
,
'test'
])
plt
.
show
()
model2
=
CNN_Classification
(
200
)
# output = train_model_class(model2, train_input, train_classes, mini_batch_size, nb_epochs=100)
# output1, output2 = train_model_class(model2, train_input, train_classes, mini_batch_size, nb_epochs=100)
# output_class = torch.cat((output1, output2), 1).detach()
# print(output_class.size())
# errors = compute_nb_errors_class(model2, train_input, train_classes, mini_batch_size)
# print((1-errors/20000)*100)
# print(output1.size(), output2.size())
# print(torch.cat((output1, output2), 1).size())
model3
=
MLP_Comparer
(
200
)
# train_model_comp(model3, output_class, train_target, mini_batch_size, nb_epochs=100)
# error_comp = compute_nb_errors_comp(model3, output_class, train_target, mini_batch_size)
# print(f'accuracy of comparing = {100-(error_comp/10)}')
model_WS
=
CNN_WS
(
200
)
train_model
(
model_WS
,
train_input
,
train_target
,
mini_batch_size
,
nb_epochs
=
32
)
errors_WS
=
compute_nb_errors
(
model_WS
,
train_input
,
train_target
,
mini_batch_size
)
errors_WS_test
=
compute_nb_errors
(
model_WS
,
test_input
,
test_target
,
mini_batch_size
)
print
(
f
'accuracy of Weight Sharing =
{
100
-
(
errors_WS
/
10
)
}
'
)
print
(
f
'accuracy of Weight Sharing, testing =
{
100
-
(
errors_WS_test
/
10
)
}
'
)
def
forward
(
self
,
x
):
# output of classifier
out
=
self
.
classifier
(
x
[:,
0
],
x
[:,
1
])
# output of comparer (final result)
res
=
self
.
comparer
(
out
)
# keep both, for Auxiliary Loss
return
out
,
res
## CNN WS + AL
class
CNN_WS_AL
(
nn
.
Module
):
def
__init__
(
self
,
nb_hidden
):
super
().
__init__
()
...
...
@@ -329,59 +195,9 @@ class CNN_WS_AL(nn.Module):
self
.
comparer
=
MLP_Comparer
(
nb_hidden
)
def
forward
(
self
,
x
):
#
x = self.classifier(torch.cat((x1, x2), 1))
#
output of classifier
out
=
self
.
classifier
(
x
[:,
0
],
x
[:,
1
])
#
x = self.comparer(self.classifier(x[:, 0], x[:,1])
)
#
output of comparer (final result
)
res
=
self
.
comparer
(
out
)
# return torch.cat((x1, x2), 1)
return
out
,
res
def
train_model_WSAL
(
model
,
train_input
,
train_target
,
train_classes
,
mini_batch_size
,
nb_epochs
=
100
):
criterion1
=
nn
.
CrossEntropyLoss
()
criterion2
=
nn
.
BCELoss
()
eta
=
1e-1
for
e
in
range
(
nb_epochs
):
acc_loss
=
0
for
b
in
range
(
0
,
train_input
.
size
(
0
),
mini_batch_size
):
out
,
res
=
model
(
train_input
.
narrow
(
0
,
b
,
mini_batch_size
))
y1
=
train_classes
[:,
0
].
narrow
(
0
,
b
,
mini_batch_size
)
y2
=
train_classes
[:,
1
].
narrow
(
0
,
b
,
mini_batch_size
)
loss1
=
criterion1
(
out
[:,
0
:
10
],
y1
)
loss2
=
criterion1
(
out
[:,
10
:
20
],
y2
)
y
=
train_target
.
narrow
(
0
,
b
,
mini_batch_size
)
loss3
=
criterion2
(
res
.
view
(
-
1
),
y
.
float
())
loss
=
0.5
*
loss1
+
0.5
*
loss2
+
1.0
*
loss3
acc_loss
=
acc_loss
+
loss
.
item
()
optimizer
=
torch
.
optim
.
Adam
(
model
.
parameters
(),
lr
=
1e-4
,
weight_decay
=
0.9
)
optimizer
.
zero_grad
()
# model.zero_grad()
loss
.
backward
()
optimizer
.
step
()
with
torch
.
no_grad
():
for
p
in
model
.
parameters
():
p
-=
eta
*
p
.
grad
print
(
e
,
acc_loss
)
def
compute_nb_errors_AL
(
model
,
input
,
target
,
mini_batch_size
):
nb_errors
=
0
for
b
in
range
(
0
,
input
.
size
(
0
),
mini_batch_size
):
_
,
output
=
model
(
input
.
narrow
(
0
,
b
,
mini_batch_size
))
output_b
=
(
output
.
view
(
-
1
)
>
0.5
).
float
()
for
k
in
range
(
mini_batch_size
):
if
target
[
b
+
k
]
!=
output_b
[
k
]:
nb_errors
=
nb_errors
+
1
return
nb_errors
model_WSAL
=
CNN_WS_AL
(
100
)
train_model_WSAL
(
model_WSAL
,
train_input
,
train_target
,
train_classes
,
mini_batch_size
,
nb_epochs
=
40
)
errors_WSAL
=
compute_nb_errors_AL
(
model_WSAL
,
train_input
,
train_target
,
mini_batch_size
)
errors_WSAL_test
=
compute_nb_errors_AL
(
model_WSAL
,
test_input
,
test_target
,
mini_batch_size
)
print
(
f
'accuracy of Weight Sharing +AL=
{
100
-
(
errors_WSAL
/
10
)
}
'
)
print
(
f
'accuracy of Weight Sharing, testing +AL=
{
100
-
(
errors_WSAL_test
/
10
)
}
'
)
\ No newline at end of file
# keep both, for Auxiliary Loss
return
out
,
res
\ No newline at end of file
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment