-
Notifications
You must be signed in to change notification settings - Fork 2
/
models.py
127 lines (105 loc) · 4.24 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torch.nn import Module
from torchvision import transforms
from .blocks import *
class Normalize:
def __init__(self, opt, expected_values, variance):
self.n_channels = opt.input_channel
self.expected_values = expected_values
self.variance = variance
assert self.n_channels == len(self.expected_values)
def __call__(self, x):
x_clone = x.clone()
for channel in range(self.n_channels):
x_clone[:, channel] = (x[:, channel] - self.expected_values[channel]) / self.variance[channel]
return x_clone
class Denormalize:
def __init__(self, opt, expected_values, variance):
self.n_channels = opt.input_channel
self.expected_values = expected_values
self.variance = variance
assert self.n_channels == len(self.expected_values)
def __call__(self, x):
'''print(x)
print(self.variance)
print(self.expected_values)'''
x_clone = x.clone()
for channel in range(self.n_channels):
#print(channel)
x_clone[:, channel] = x[:, channel] * self.variance[channel] + self.expected_values[channel]
'''print(x[:, channel])
print(x[:, channel] * self.variance[channel] + self.expected_values[channel])
print(x_clone[:, channel])
print("DONE FOR")'''
'''print(x_clone)
print("done Denormalize")'''
return x_clone
class Normalizer:
def __init__(self, opt):
self.normalizer = self._get_normalizer(opt)
def _get_normalizer(self, opt):
if opt.dataset == "cifar10":
normalizer = Normalize(opt, [0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])
elif opt.dataset == "mnist":
normalizer = Normalize(opt, [0.5], [0.5])
elif opt.dataset == "gtsrb" or opt.dataset == "celeba":
normalizer = None
elif opt.dataset in ["imagenet_sub200"]:
normalizer = Normalize(opt, [0.4802, 0.4481, 0.3975], [0.2302, 0.2265, 0.2262])
#expected_values = [0.4802, 0.4481, 0.3975]
#variance = [0.2302, 0.2265, 0.2262]
else:
raise Exception("Invalid dataset")
return normalizer
def __call__(self, x):
if self.normalizer:
x = self.normalizer(x)
return x
class Denormalizer:
def __init__(self, opt):
self.denormalizer = self._get_denormalizer(opt)
def _get_denormalizer(self, opt):
print(opt.dataset)
if opt.dataset == "cifar10":
denormalizer = Denormalize(opt, [0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])
elif opt.dataset == "mnist":
denormalizer = Denormalize(opt, [0.5], [0.5])
elif opt.dataset == "gtsrb" or opt.dataset == "celeba":
denormalizer = None
elif opt.dataset in ["imagenet_sub200"]:
denormalizer = Denormalize(opt, [0.4802, 0.4481, 0.3975], [0.2302, 0.2265, 0.2262])
else:
raise Exception("Invalid dataset")
return denormalizer
def __call__(self, x):
if self.denormalizer:
x = self.denormalizer(x)
return x
# ---------------------------- Classifiers ----------------------------#
class MNISTBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(MNISTBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.ind = None
def forward(self, x):
return self.conv1(F.relu(self.bn1(x)))
class NetC_MNIST(nn.Module):
def __init__(self):
super(NetC_MNIST, self).__init__()
self.conv1 = nn.Conv2d(1, 32, (3, 3), 2, 1) # 14
self.relu1 = nn.ReLU(inplace=True)
self.layer2 = MNISTBlock(32, 64, 2) # 7
self.layer3 = MNISTBlock(64, 64, 2) # 4
self.flatten = nn.Flatten()
self.linear6 = nn.Linear(64 * 4 * 4, 512)
self.relu7 = nn.ReLU(inplace=True)
self.dropout8 = nn.Dropout(0.3)
self.linear9 = nn.Linear(512, 10)
def forward(self, x):
for module in self.children():
x = module(x)
return x