/
linreg.py
67 lines (52 loc) · 1.7 KB
/
linreg.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
from itertools import count
import torch
import torch.autograd
import torch.nn.functional as F
from torch.autograd import Variable
class Polynom(object):
def __init__(self, degree):
self.degree = degree
self.W = torch.randn(degree, 1) * 5
self.b = torch.randn(1) * 5
@classmethod
def show(cls,
W: torch.FloatTensor,
b: torch.FloatTensor):
desc = 'y ='
W = W.view(-1)
for i, w in enumerate(W):
desc += ' {:+.2f} x^{}'.format(w, len(W) - i)
desc += ' {:+.2f}'.format(b[0])
return desc
def __str__(self):
return Polynom.show(self.W, self.b)
def get_batch(self, batch_size=32):
x_rand = torch.randn(batch_size).unsqueeze(1)
x = torch.cat([
x_rand ** i for i in range(1, self.degree + 1)
], 1)
y = x.mm(self.W) + self.b[0] # must use b[0] as a number
return Variable(x), Variable(y)
# Learning target
poly = Polynom(degree=4)
# The model
nnet = torch.nn.Linear(poly.degree, 1)
# Train it
print('------- TRAINING ---------')
for batch_idx in count(1):
batch_x, batch_y = poly.get_batch(64)
nnet.zero_grad()
output = F.smooth_l1_loss(nnet(batch_x), batch_y)
output.backward()
batch_loss = output.data[0]
# Upgrade model
for param in nnet.parameters():
param.data.add_(-0.003 * param.grad.data)
if 0 == batch_idx % 100:
print('batch', batch_idx, 'loss', batch_loss)
if batch_loss < 1e-3:
break
print('------- RESULT ---------')
print('==> Learned function: {}'.format(
Polynom.show(nnet.weight.data, nnet.bias.data)))
print('==> Actual function: {}'.format(poly))