-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathmodules.py
68 lines (48 loc) · 1.89 KB
/
modules.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import torch
import torch.nn as nn
import torch.nn.functional as F
class BMV(nn.Module):
def __init__(self):
super(BMV, self).__init__()
def forward(self, x, y, **kwargs):
return torch.matmul(x, y.unsqueeze(2)).squeeze(2)
class OuterProduct(nn.Module):
def __init__(self):
super(OuterProduct, self).__init__()
def forward(self, x, y, **kwargs):
return x.unsqueeze(2) * y.unsqueeze(1)
class AffineLocator(nn.Module):
def __init__(self, glimpse_size=22):
super(AffineLocator, self).__init__()
self.glimpse_size = glimpse_size
def forward(self, theta, x):
theta = theta.view(x.size(0), 2, 3)
grid = F.affine_grid(theta, torch.Size((x.size(0), x.size(1), self.glimpse_size, self.glimpse_size)))
x = F.grid_sample(x, grid)
return x
class AffineEmitter(nn.Module):
def __init__(self, size, output_inverse=False, dropout=0.3):
super(AffineEmitter, self).__init__()
self.fc1 = nn.Linear(size, int(size / 2))
if output_inverse:
self.fc3 = nn.Linear(int(size / 2), 12)
self.fc3.weight.data.fill_(0)
self.fc3.bias.data = torch.FloatTensor([1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0])
else:
self.fc3 = nn.Linear(int(size / 2), 6)
self.fc3.weight.data.fill_(0)
self.fc3.bias.data = torch.FloatTensor([1, 0, 0, 0, 1, 0])
self.drop = nn.Dropout(p=dropout)
self.nl = nn.ReLU()
def forward(self, x):
x = self.nl(self.drop(self.fc1(x)))
x = self.fc3(x)
return x
class LSTM(nn.Module):
def __init__(self, size_in, size_out):
super(LSTM, self).__init__()
self.rnn = nn.LSTMCell(size_in, size_out)
def forward(self, x, h, c):
x, c = self.rnn(x, (h, c))
h = x
return x, h, c