예제 #1
0
for i in range(iters_num):
    batch_mask = np.random.choice(train_size, batch_size)
    #print(batch_mask )
    x_batch = x_train[batch_mask]
    t_batch = y_train[batch_mask]
    #quit()

    # 勾配の計算
    grad = network.gradient(x_batch, t_batch)

    # パラメータの更新
    for key in ('W1', 'b1', 'W2', 'b2'):
        network.params[key] -= learning_rate * grad[key]

    loss = network.loss(x_batch, t_batch)
    train_loss_list.append(loss)

    if i % iter_per_epoch == 0:
        train_acc = network.accuracy(x_train, y_train)
        test_acc = network.accuracy(x_test, y_test)
        #        test_acc  = 0
        train_acc_list.append(train_acc)
        test_acc_list.append(test_acc)
        print("i=" + str(i) + ", train acc, test acc | " + str(train_acc) +
              ", " + str(test_acc) + " , loss=" + str(loss))
        print('time : ', time.time() - global_start_time)
#pred
train_acc = network.accuracy(x_train, y_train)
test_acc = network.accuracy(x_test, y_test)
#
예제 #2
0
import numpy as np
import sys
sys.path.append('./src')
sys.path.append('./src/lib')
from simple_net import SimpleNet
from gradient import numerical_gradient

net = SimpleNet()
print(net.W)
# >>> [[-0.44439281  0.30789016 -1.50579685]
#      [-0.93170709  0.08170439 -0.12740328]]

x = np.array([0.6, 0.9])
p = net.predict(x)
print(p)
# >>> [ 1.00824761 -1.47819523  0.03650346]

print(np.argmax(p))
# >> 1

t = np.array([0, 0, 1])
print(net.loss(x, t))
# >>> 1.704819611629646

f = lambda w: net.loss(x, t)
dW = numerical_gradient(f, net.W)
print(dW)
# >>> [[ 0.09999078  0.39092591 -0.49091668]
#      [ 0.14998616  0.58638886 -0.73637502]]