forked from yzs981130/cuda-wrapper
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsingle.py
52 lines (39 loc) · 1.25 KB
/
single.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# Training settings
batch_size = 64
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc = nn.Linear(244*244*3, 10)
def forward(self, x):
return self.fc(x.view(x.size(0), -1))
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
def train(epoch):
loss100 = 0.0
start = time.time()
for i in range(100000000):
inputs = torch.randn(1024, 3, 244, 244)
target = torch.randint(0, 9, (1024,)).long()
inputs = inputs.to(device)
target = target.to(device)
optimizer.zero_grad()
output = model(inputs)
loss = F.cross_entropy(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss100 += loss.item()
if i % 1000 == 0:
end = time.time()
print('[Epoch %d, Batch %5d] loss: %.3f time: %s' %
(epoch + 1, i + 1, loss100 / 100, end - start))
loss100 = 0.0
start = time.time()
# torch.save(model, '/home/aistudio/mnist_net.pkl')
train(0)