-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathcifar_baseline_self_sup.py
93 lines (64 loc) · 2.94 KB
/
cifar_baseline_self_sup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchbearer
import torchvision
from torchbearer import Trial, callbacks
from torchvision import transforms
from cifar_baseline_vae import CifarVAE
class SelfTaught(nn.Module):
def __init__(self, encoder, mu, size):
super(SelfTaught, self).__init__()
self.encoder = encoder
self.mu = mu
self.classifier = nn.Sequential(
nn.Linear(size, 10)
)
def forward(self, x, state=None):
features = self.encoder(x)
return F.log_softmax(self.classifier(self.mu(features)))
def evaluate(file, device='cuda'):
transform_test = transforms.Compose([
transforms.ToTensor()
])
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=10)
base_dir = 'cifarss_base'
model = CifarVAE()
model = SelfTaught(model.encoder, model.mu, 32)
optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=0)
trial = Trial(model, optimizer, nn.NLLLoss(), ['acc', 'loss']).load_state_dict(torch.load(os.path.join(base_dir, file)), resume=False).with_generators(val_generator=testloader).to(device)
return trial.evaluate()
def run(file, device='cuda'):
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.25, 0.25, 0.25, 0.25),
transforms.ToTensor()
])
transform_test = transforms.Compose([
transforms.ToTensor()
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=10)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=10)
base_dir = 'cifarss_base'
model = CifarVAE()
model.load_state_dict(torch.load(file)[torchbearer.MODEL])
model = SelfTaught(model.encoder, model.mu, 32)
for param in model.encoder.parameters():
param.requires_grad = False
for param in model.mu.parameters():
param.requires_grad = False
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-3)
trial = Trial(model, optimizer, nn.NLLLoss(), ['acc', 'loss'], pass_state=True, callbacks=[
callbacks.MultiStepLR([25, 40, 45]),
callbacks.MostRecent(os.path.join(base_dir, '{epoch:02d}.pt')),
callbacks.GradientClipping(5)
]).with_generators(train_generator=trainloader, val_generator=testloader).for_val_steps(5).to(device)
trial.run(50)
if __name__ == "__main__":
run('cifar_vae/iter_0.99.pt')
print(evaluate('49.pt'))