Exemplo n.º 1
0
import numpy as np

from chainer0 import Function, Variable
from chainer0.functions.basic_math import Add, Mul

x = Variable(np.array([2.0]))
y = x**2 + x + 1.0

y.backward(enable_double_backprop=True)
dx = x.grad_var

print('y', y.data)
print('dx', x.grad)
assert y.data == 7.
assert x.grad == 5.

x.cleargrad()
dx.backward()
print('ddx', x.grad)
assert x.grad == 2.

dx = x.grad_var
x.cleargrad()
dx.backward()
print('dddx', x.grad)
assert x.grad == 0.
Exemplo n.º 2
0
                   [0.74, -2.49, 1.39]]))
#targets = Variable(np.array([[True], [True], [False], [True]]))
#weights = Variable(np.array([[0.0], [0.0], [0.0]]))

targets = Variable(np.array([True, True, False, True]))
weights = Variable(np.array([0.0, 0.0, 0.0]))



# Define a function that returns gradients of training loss using Autograd.

print("Initial loss:", training_loss(weights).data)
assert training_loss(weights).data == 2.772588722239781

for i in range(100):
    weights.cleargrad()
    loss = training_loss(weights)
    loss.backward()
    #weights -= weights.grad * 0.01
    weights.data -= weights.grad * 0.01

print("Trained loss:", training_loss(weights).data)
assert training_loss(weights).data == 0.38900754315581143

'''
training_gradient_fun = grad(training_loss)

# Optimize weights using gradient descent.

print("Initial loss:", training_loss(weights))
for i in range(100):