示例#1
0
    x_batch = x_train_std[batch_mask]
    t_batch = y_train[batch_mask]
    #quit()

    # 勾配の計算
    grad = network.gradient(x_batch, t_batch)

    # パラメータの更新
    for key in ('W1', 'b1', 'W2', 'b2'):
        network.params[key] -= learning_rate * grad[key]

    loss = network.loss(x_batch, t_batch)
    train_loss_list.append(loss)

    if i % iter_per_epoch == 0:
        train_acc = network.accuracy(x_train, y_train)
        #        test_acc  = network.accuracy(x_test, y_test)
        test_acc = 0
        train_acc_list.append(train_acc)
        test_acc_list.append(test_acc)
        print("i=" + str(i) + ", train acc, test acc | " + str(train_acc) +
              ", " + str(test_acc) + " , loss=" + str(loss))
        print('time : ', time.time() - global_start_time)
#pred
#train_acc = network.accuracy(x_train, y_train)
#test_acc  = network.accuracy(x_test, y_test)
#
print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc) +
      " , loss=" + str(loss))
print('time : ', time.time() - global_start_time)
#
示例#2
0
    x_batch = x_train[batch_mask]
    t_batch = y_train[batch_mask]
    #quit()

    # 勾配の計算
    grad = network.gradient(x_batch, t_batch)

    # パラメータの更新
    for key in ('W1', 'b1', 'W2', 'b2'):
        network.params[key] -= learning_rate * grad[key]

    loss = network.loss(x_batch, t_batch)
    train_loss_list.append(loss)

    if i % iter_per_epoch == 0:
        train_acc = network.accuracy(x_train, y_train)
        test_acc = network.accuracy(x_test, y_test)
        #        test_acc  = 0
        train_acc_list.append(train_acc)
        test_acc_list.append(test_acc)
        print("i=" + str(i) + ", train acc, test acc | " + str(train_acc) +
              ", " + str(test_acc) + " , loss=" + str(loss))
        print('time : ', time.time() - global_start_time)
#pred
train_acc = network.accuracy(x_train, y_train)
test_acc = network.accuracy(x_test, y_test)
#
print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc) +
      " , loss=" + str(loss))
print('time : ', time.time() - global_start_time)
#
示例#3
0
(x_train, t_train), (x_test, t_test) = mnist.load_data()
t_train_one_hot = one_hot(t_train)
t_test_one_hot = one_hot(t_test)
x_train_reshape = x_train.reshape(60000, 784)
x_test_reshape = x_test.reshape(10000, 784)  # 将之转化为我想要的数据格式

train_loss_list = []
iters_num = 10000
train_size = x_train.shape[0]
batch_size = 100
rate = 0.1

network = SimpleNet(input_size=784, hidden_size=50, output_size=10)

for i in range(iters_num):
    batch_mask = np.random.choice(train_size, batch_size)
    x_batch = x_train_reshape[batch_mask]  # little bracket is using  function
    t_batch = t_train_one_hot[batch_mask]  # 在样本中随机选择一小撮
    grads = network.numerical_gradient(x_batch, t_batch)
    for key in ('w1', 'b1', 'w2', 'b2'):
        network.param[key] -= rate * grads[key]  # 对参数进行了调整,发现梯度值太小
    loss = network.loss_function(x_batch, t_batch)
    print(loss)
    train_loss_list.append(loss)
    print(network.accuracy(x_batch, t_batch))  # 是随机概率,根本就没有提高

y = np.array(train_loss_list)
x = np.array(range(len(train_loss_list)))
plot.plot(x, y)
plot.show()