-
Notifications
You must be signed in to change notification settings - Fork 20
/
Copy pathTorchModels.py
105 lines (92 loc) · 3.71 KB
/
TorchModels.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torch.nn.functional as F
import numpy
class MNistNet(nn.Module):
def __init__(self, **args):
super(MNistNet, self).__init__()
ks = int(args.get('kernel_size',5))
do = float(args.get('dropout',0.5))
dense = int(args.get('dense',50))
print (dense, type(dense))
print (ks, type(ks))
self.conv1 = nn.Conv2d(1, 10, kernel_size=ks)
self.conv2 = nn.Conv2d(10, 20, kernel_size=ks)
self.conv2_drop = nn.Dropout2d(do)
self.fc1 = nn.Linear(320, dense)
self.fc2 = nn.Linear(dense, 10)
def forward(self, x):
x = x.permute(0,3,1,2).float()
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
#return F.log_softmax(x, dim=1)
#return F.softmax(x)
#return F.cross_entropy(x)
return x
### Build a customized CNN with given hyperparameters
class _ConvBlock(nn.Sequential):
def __init__(self, conv_layers, dropout, in_ch=5):
super().__init__()
for i in range(conv_layers):
channel_in = in_ch*((i+1)%5)
channel_out = in_ch*((i+2)%5)
if channel_in == 0: channel_in += 1
if channel_out == 0: channel_out += 1
self.add_module('convlayer%d'%(i), nn.Conv2d(channel_in, out_channels=channel_out,kernel_size=(3,3),stride=1, padding=1))
self.add_module('relu%d'%(i), nn.ReLU(inplace=True))
self.add_module('convlayer%d'%(conv_layers), nn.Conv2d(channel_out, out_channels=1, kernel_size=(3,3), stride=2, padding=1))
self.dropout = dropout
def forward(self, x):
x = super().forward(x)
if self.dropout > 0:
x = F.dropout(x, p=self.dropout, training=self.training)
return x
class _DenseBlock(nn.Sequential):
def __init__(self, dense_layers, dropout ,base):
super().__init__()
for i in range(dense_layers):
il = int(base//(2**i))
ol = int(base//(2**(i+1)))
print (il,"=>",ol)
self.add_module('denselayer%d'%(i), nn.Linear(il, ol))
self.add_module('relu%d'%(i), nn.ReLU(inplace=True))
self.dropout = dropout
def forward(self, x):
x = super().forward(x)
if self.dropout > 0:
x = F.dropout(x, p=self.dropout, training=self.training)
return x
class CNN(nn.Module):
def __init__(self, conv_layers=2, dense_layers=2, dropout=0.5, classes=3, in_channels=5):
super().__init__()
self.build_net(conv_layers, dense_layers, dropout, classes, in_channels)
def build_net(self,*args, **kwargs):
base_2 = 10
base = base_2**2
self.conv_layers = _ConvBlock(args[0], args[2], args[4])
self.dense_layers = _DenseBlock(args[1], args[2], base)
self.adapt_pool = nn.AdaptiveMaxPool2d((base_2,base_2))
il = int(base//(2**(args[1])))
ol = int(args[3])
print (il,"=>",ol)
self.output = nn.Linear(il, ol)
def forward(self, x):
x = x.permute(0,3,1,2).float()
x = self.conv_layers(x)
x = self.adapt_pool(x)
x = x.view(x.shape[0], -1) # flatten
x = self.dense_layers(x)
return self.output(x)