def test_cost_function(train_data, test_data): x, y, y_onehot, = train_data x_test, y_test = test_data network = NeuralNetwork(layers=[784, 25, 10]) network.fit(x, y_onehot, alpha=0.1, iterations=40) prediction_test = network.predict(x_test) accuracy_test = accuracy(prediction_test, y_test) assert accuracy_test > 90
model.add_layer(Layer(8, 3)) # 构建损失函数和优化器 lr = 0.01 loss = Loss(loss='cross_entropy_with_logits') optimizer = Optimizers(optimizer='sgd', learning_rate=lr) model.compile(loss=loss, optimizer=optimizer) # 训练数据 num_epochs = 1600 batch_size = 64 train_loss = [] test_loss = [] for epoch in range(num_epochs): for x, y in train_data.get_batch(batch_size): loss = model.fit(x, y) train_loss.append(loss) t_loss, n, right_num = 0., 0, 0 for x, y in test_data.get_batch(batch_size, shuffle=False): y_pred = model(x) right_num += np.sum( np.argmax(y_pred, axis=-1) == np.argmax(y, axis=-1)) t_loss += model.loss(y_pred, y) n += 1 accuracy = right_num / test_data.num_samples t_loss /= n test_loss.append(t_loss) print('>%d/%d: train loss=%f, test_loss=%f, accuracy=%f' % (epoch + 1, num_epochs, loss, t_loss, accuracy)) plt.plot(train_loss)
def test_network_fit(): network = NeuralNetwork(layers=[2, 3, 1]) x = np.array([[2, 3], [1, 1]]) y = np.array([2, 1]).reshape(2, 1) cost_history = network.fit(x, y, alpha=0.01, iterations=1000) print(cost_history)