def __init__(self, a, b, c, x, y): self.a = Variable(a) self.b = Variable(b) self.c = Variable(c) self.x = Variable(x) self.y = Variable(y) self.output = F.relu(self.a * self.x + self.b * self.y + self.c)
def setUp(self): self.v0_ = -3.0 self.v1_ = 2.0 self.v2_ = 4.0 self.v0 = Variable(self.v0_) self.v1 = Variable(self.v1_) self.v2 = Variable(self.v2_)
class Neuron(object): """output = relu(a*x + b*y +c)""" def __init__(self, a, b, c, x, y): self.a = Variable(a) self.b = Variable(b) self.c = Variable(c) self.x = Variable(x) self.y = Variable(y) self.output = F.relu(self.a * self.x + self.b * self.y + self.c) def math(self, a, b, c, x, y): n = a * x + b * y + c return max(n, 0.0) # relu def math_grad(self, a, b, c, x, y, h=0.001): t = self.math(a, b, c, x, y) a_grad = (self.math(a + h, b, c, x, y) - t) / h b_grad = (self.math(a, b + h, c, x, y) - t) / h c_grad = (self.math(a, b, c + h, x, y) - t) / h x_grad = (self.math(a, b, c, x + h, y) - t) / h y_grad = (self.math(a, b, c, x, y + h) - t) / h return a_grad, b_grad, c_grad, x_grad, y_grad def grad(self): return (self.a.grad, self.b.grad, self.c.grad, self.x.grad, self.y.grad) def forward(self): self.output.forward() return self.output.value def backward(self, grad=1.0): self.output.backward(grad) def zero_grad(self): self.output.zero_grad(backprop=True) def grad_descent(self, lr=0.01): self.a.set_value(-lr * self.a.grad, True) self.b.set_value(-lr * self.b.grad, True) self.c.set_value(-lr * self.c.grad, True) self.x.set_value(-lr * self.x.grad, True) self.y.set_value(-lr * self.y.grad, True)
class Neuron(object): def __init__(self, in_dim): self.in_dim = in_dim self.bias = Variable() self.weights = [Variable() for i in range(self.in_dim)] def init_params(self): """Init parameters with standard gaussian distribution.""" for w in self.weights: w.set_value(random.gauss(0, 1), False) self.bias.set_value(random.gauss(0, 1), False) def params(self): return self.weights + [self.bias] def forward(self, inputs): assert len(inputs) == self.in_dim output = sum([v * w for v, w in zip(inputs, self.weights)]) return F.relu(output + self.bias)
def forward(self, inputs): """`inputs` is a list of the values of a sample.""" assert len(inputs) == self.sizes[0] inputs = [Variable(value) for value in inputs] for layer in self.layers: inputs = layer.forward(inputs) self.outputs = inputs for output in self.outputs: output.forward() return self
def __init__(self, in_dim): self.in_dim = in_dim self.bias = Variable() self.weights = [Variable() for i in range(self.in_dim)]
class TestVariableOperator(unittest.TestCase): def setUp(self): self.v0_ = -3.0 self.v1_ = 2.0 self.v2_ = 4.0 self.v0 = Variable(self.v0_) self.v1 = Variable(self.v1_) self.v2 = Variable(self.v2_) def tearDown(self): pass def test_add(self): v = self.v0 + self.v1 v_ = self.v0_ + self.v1_ v0_grad_, v1_grad_ = 1.0, 1.0 self.assertIsNone(v.value, None) v.forward() self.assertAlmostEqual(v.value, v_) # clear gradient buffer v.zero_grad() v.backward() self.assertEqual(v.grad, 1.0) self.assertAlmostEqual(self.v0.grad, v0_grad_) self.assertAlmostEqual(self.v1.grad, v1_grad_) # accumulate gradient v.backward() self.assertEqual(v.grad, 2.0) self.assertAlmostEqual(self.v0.grad, 3.0*v0_grad_) self.assertAlmostEqual(self.v1.grad, 3.0*v1_grad_) # add with const v = self.v0 + 1 v_ = self.v0_ + 1 v.forward() self.assertAlmostEqual(v.value, v_) v = 1 + self.v0 v_ = 1 + self.v0_ v.forward() self.assertAlmostEqual(v.value, v_) def test_sub(self): v = self.v0 - self.v1 v_ = self.v0_ - self.v1_ v0_grad_, v1_grad_ = 1.0, -1.0 self.assertIsNone(v.value, None) v.forward() self.assertAlmostEqual(v.value, v_) # clear gradient buffer v.zero_grad() v.backward() self.assertEqual(v.grad, 1.0) self.assertAlmostEqual(self.v0.grad, v0_grad_) self.assertAlmostEqual(self.v1.grad, v1_grad_) # accumulate gradient v.backward() self.assertEqual(v.grad, 2.0) self.assertAlmostEqual(self.v0.grad, 3.0*v0_grad_) self.assertAlmostEqual(self.v1.grad, 3.0*v1_grad_) # subtract with const v = self.v0 - 1 v_ = self.v0_ - 1 v.forward() self.assertAlmostEqual(v.value, v_) v = 1 - self.v0 v_ = 1 - self.v0_ v.forward() self.assertAlmostEqual(v.value, v_) def test_mut(self): v = self.v0 * self.v1 v_ = self.v0_ * self.v1_ v0_grad_, v1_grad_ = self.v1_, self.v0_ self.assertIsNone(v.value, None) v.forward() self.assertAlmostEqual(v.value, v_) # clear gradient buffer v.zero_grad() v.backward() self.assertEqual(v.grad, 1.0) self.assertAlmostEqual(self.v0.grad, v0_grad_) self.assertAlmostEqual(self.v1.grad, v1_grad_) # accumulate gradient v.backward() self.assertEqual(v.grad, 2.0) self.assertAlmostEqual(self.v0.grad, 3.0 * v0_grad_) self.assertAlmostEqual(self.v1.grad, 3.0 * v1_grad_) # multiply with const v = self.v0 * 2 v_ = self.v0_ * 2 v.forward() self.assertAlmostEqual(v.value, v_) v = 2 * self.v0 v_ = 2 * self.v0_ v.forward() self.assertAlmostEqual(v.value, v_) def test_div(self): v = self.v0 / self.v1 v_ = self.v0_ / self.v1_ v0_grad_ = 1 / self.v1_ v1_grad_ = -self.v0_ / (self.v1_ ** 2) self.assertIsNone(v.value, None) v.forward() self.assertAlmostEqual(v.value, v_) # clear gradient buffer v.zero_grad() v.backward() self.assertEqual(v.grad, 1.0) self.assertAlmostEqual(self.v0.grad, v0_grad_) self.assertAlmostEqual(self.v1.grad, v1_grad_) # accumulate gradient v.backward() self.assertEqual(v.grad, 2.0) self.assertAlmostEqual(self.v0.grad, 3.0*v0_grad_) self.assertAlmostEqual(self.v1.grad, 3.0*v1_grad_) # divide with const v = self.v0 / 2 v_ = self.v0_ / 2 v.forward() self.assertAlmostEqual(v.value, v_) v = 2 / self.v0 v_ = 2 / self.v0_ v.forward() self.assertAlmostEqual(v.value, v_) def test_chain_mut(self): v = self.v1 * self.v1 * self.v1 v_ = self.v1_ * self.v1_ * self.v1_ v1_square = self.v1_ * self.v1_ self.assertIsNone(v.value, None) v.forward() self.assertAlmostEqual(v.value, v_) # clear gradient v.zero_grad() v.backward() self.assertEqual(v.grad, 1.0) self.assertAlmostEqual(self.v1.grad, 3*v1_square) # accumulate gradient v.backward() self.assertEqual(v.grad, 2.0) self.assertAlmostEqual(self.v1.grad, 11*v1_square) # not 3 times # mutliply const v = 2 * self.v1 * 3 v_ = 2 * self.v1_ * 3 v.forward() self.assertAlmostEqual(v.value, v_) def test_iadd(self): self.v0 += self.v1 + 3 self.v0.forward() v0_ = self.v0_ + self.v1_ + 3 self.assertAlmostEqual(self.v0.value, v0_) def test_isub(self): self.v0 -= self.v1 + 3 self.v0.forward() v0_ = self.v0_ - self.v1_ - 3 self.assertAlmostEqual(self.v0.value, v0_) def test_imut(self): self.v0 *= self.v1 * 3 self.v0.forward() v0_ = self.v0_ * self.v1_ * 3 self.assertAlmostEqual(self.v0.value, v0_) def test_idiv(self): self.v0 /= self.v1 * 3 self.v0.forward() v0_ = self.v0_ / self.v1_ / 3 self.assertAlmostEqual(self.v0.value, v0_) def test_max_f(self): v = F.max(self.v1, self.v2) v_ = max(self.v1_, self.v2_) if self.v1_ > self.v2_: v1_grad_ = 1.0 v2_grad_ = 0.0 else: v1_grad_ = 0.0 v2_grad_ = 1.0 self.assertIsNone(v.value, None) v.forward() self.assertAlmostEqual(v.value, v_) # clear gradient v.zero_grad() v.backward() self.assertEqual(v.grad, 1.0) self.assertAlmostEqual(self.v1.grad, v1_grad_) self.assertAlmostEqual(self.v2.grad, v2_grad_) # accumulate gradient v.backward() self.assertEqual(v.grad, 2.0) self.assertAlmostEqual(self.v1.grad, 3.0*v1_grad_) self.assertAlmostEqual(self.v2.grad, 3.0*v2_grad_) def test_max_f(self): v = F.min(self.v1, self.v2) v_ = min(self.v1_, self.v2_) if self.v1_ < self.v2_: v1_grad_ = 1.0 v2_grad_ = 0.0 else: v1_grad_ = 0.0 v2_grad_ = 1.0 self.assertIsNone(v.value, None) v.forward() self.assertAlmostEqual(v.value, v_) # clear gradient v.zero_grad() v.backward() self.assertEqual(v.grad, 1.0) self.assertAlmostEqual(self.v1.grad, v1_grad_) self.assertAlmostEqual(self.v2.grad, v2_grad_) # accumulate gradient v.backward() self.assertEqual(v.grad, 2.0) self.assertAlmostEqual(self.v1.grad, 3.0*v1_grad_) self.assertAlmostEqual(self.v2.grad, 3.0*v2_grad_) def test_square_f(self): v = F.square(self.v1) v_ = math.pow(self.v1_, 2) v1_grad_ = 2 * self.v1_ self.assertIsNone(v.value, None) v.forward() self.assertAlmostEqual(v.value, v_) # clear gradient v.zero_grad() v.backward() self.assertEqual(v.grad, 1.0) self.assertAlmostEqual(self.v1.grad, v1_grad_) # accumulate gradient v.backward() self.assertEqual(v.grad, 2.0) self.assertAlmostEqual(self.v1.grad, 3.0*v1_grad_) def test_pow_f(self): v = F.pow(self.v1, 3) v_ = math.pow(self.v1_, 3) v1_grad_ = 3 * (self.v1_ ** 2) self.assertIsNone(v.value, None) v.forward() self.assertAlmostEqual(v.value, v_) # clear gradient v.zero_grad() v.backward() self.assertEqual(v.grad, 1.0) self.assertAlmostEqual(self.v1.grad, v1_grad_) # accumulate gradient v.backward() self.assertEqual(v.grad, 2.0) self.assertAlmostEqual(self.v1.grad, 3.0*v1_grad_) def test_exp_f(self): v = F.exp(self.v1) v_ = math.exp(self.v1_) v1_grad_ = math.exp(self.v1_) self.assertIsNone(v.value, None) v.forward() self.assertAlmostEqual(v.value, v_) # clear gradient v.zero_grad() v.backward() self.assertEqual(v.grad, 1.0) self.assertAlmostEqual(self.v1.grad, v1_grad_) # accumulate gradient v.backward() self.assertEqual(v.grad, 2.0) self.assertAlmostEqual(self.v1.grad, 3.0*v1_grad_) def test_sigmoid_f(self): v = F.sigmoid(self.v1) v_ = 1/ (1 + math.exp(-self.v1_)) v1_grad_ = math.exp(-self.v1_) / ((1 + math.exp(-self.v1_)) ** 2) self.assertIsNone(v.value, None) v.forward() self.assertAlmostEqual(v.value, v_) # clear gradient v.zero_grad() v.backward() self.assertEqual(v.grad, 1.0) self.assertAlmostEqual(self.v1.grad, v1_grad_) # accumulate gradient v.backward() self.assertEqual(v.grad, 2.0) self.assertAlmostEqual(self.v1.grad, 3.0*v1_grad_) def test_relu_f(self): a = F.relu(self.v0) b = F.relu(self.v1) v0_, v1_ = max(self.v0_, 0.0), max(self.v1_, 0.0) v0_grad_ = 1.0 if self.v0_ > 0.0 else 0.0 v1_grad_ = 1.0 if self.v1_ > 0.0 else 0.0 self.assertIsNone(a.value, None) self.assertIsNone(b.value, None) a.forward() b.forward() self.assertAlmostEqual(a.value, v0_) self.assertAlmostEqual(b.value, v1_) # clear gradient a.zero_grad() a.backward() self.assertEqual(a.grad, 1.0) self.assertAlmostEqual(self.v0.grad, v0_grad_) b.zero_grad() b.backward() self.assertEqual(b.grad, 1.0) self.assertAlmostEqual(self.v1.grad, v1_grad_) def test_set_value(self): v = self.v0 + self.v1 vv = v * self.v2 vv.forward() v_ = self.v0_ + self.v1_ vv_ = v_ * self.v2_ self.assertAlmostEqual(v.value, v_) self.assertAlmostEqual(vv.value, vv_) prev_vv = vv.value # set_value will be forward override v.set_value(4, True) vv.forward() self.assertAlmostEqual(vv.value, prev_vv) # set_value will NOT be forward override self.v0.set_value(100, True) vv.forward() self.assertNotAlmostEqual(vv.value, prev_vv) def test_positive(self): self.v0.set_value(-3.0, False) v = +self.v0 v.forward() self.assertAlmostEqual(v.value, -3.0) def test_negative(self): self.v0.set_value(-3.0, False) v = -self.v0 v.forward() self.assertAlmostEqual(v.value, 3.0) def test_absolute(self): self.v0.set_value(-3.0, False) v = abs(self.v0) v.forward() self.assertAlmostEqual(v.value, 3.0)