コード例 #1
0
alpha = 1
lambdaa = 0.0001
print("nn topo : {}".format(topo))

## Momentum
learning_rate = MomentumLearningRate(learning_rate=alpha, beta=0.9)

network = NeuralNetwork(topo=topo,
                        alpha=alpha,
                        learning_rate=learning_rate,
                        lambdaa=lambdaa,
                        regularization=L2Regularization).initialize()

for epoch in range(300):
    network.forward(train_X)
    network.backward(train_Y)
    if epoch % 10 == 0:
        train_loss = np.mean(network.loss(train_X, train_Y))
        test_loss = np.mean(network.loss(test_X, test_Y))
        test_acc = network.accuracy(test_X, test_Y)
        print("Epoch:{} Training Loss:{} Test Loss:{} Test Acc:{}".format(
            epoch, train_loss, test_loss, test_acc))
## final
train_loss = np.mean(network.loss(train_X, train_Y))
test_loss = np.mean(network.loss(test_X, test_Y))
test_acc = network.accuracy(test_X, test_Y)
print("Epoch:{} Training Loss:{} Test Loss:{} Test Acc:{}".format(
    "Final", train_loss, test_loss, test_acc))

# pre = (network.predict(test_X)>0.5).astype(float)
pre = network.predict(test_X)
コード例 #2
0
ファイル: std_bp.py プロジェクト: raven1989/MachineLearning
print("Train Y : {}".format(train_Y))
print("Test X : {}".format(test_X))
print("Test Y : {}".format(test_Y))

topo = [np.reshape(X[0], -1).shape[0], 5, 1]
alpha = 1
lambdaa = 0.00001
print("nn topo : {}".format(topo))
network = NeuralNetwork(topo=topo,
                        alpha=alpha,
                        lambdaa=lambdaa,
                        regularization=L2Regularization).initialize()

for epoch in range(1000):
    for x, y in zip(train_X, train_Y):
        network.forward(np.reshape(x, newshape=(1, x.shape[0])))
        network.backward(np.reshape(y, newshape=(1, y.shape[0])))
    if epoch % 100 == 0:
        # loss = []
        # for x,y in zip(X,Y):
        #   loss.append(network.loss(x, y))
        train_loss = np.mean(network.loss(train_X, train_Y))
        test_loss = np.mean(network.loss(test_X, test_Y))
        test_acc = network.accuracy(test_X, test_Y)
        print("Epoch:{} Training Loss:{} Test Loss:{} Test Acc:{}".format(
            epoch, train_loss, test_loss, test_acc))

# pre = (network.predict(test_X)>0.5).astype(float)
pre = network.predict(test_X)
print("Predict : {}".format(pre))
コード例 #3
0
ファイル: main.py プロジェクト: nicedi/ML_course_projects
        # 任务3:实现utils中 make_onehot 函数

        batchy = utils.make_onehot(trainy[idxs[j:j + batchsize]], 10)
        # 数据的前馈(feed forward)和误差的反向传播(back propagation)
        # 是人工神经网络中的两种数据流向,这里用 forward 和 backward 为下列
        # 两个方法命名是与其它大型机器学习框架对人工神经网络中有关方法的命名保持一致

        y_hat = model.forward(batchX)

        # 任务4:理解utils中cross_entropy的实现代码

        loss1 = utils.cross_entropy(y_hat, batchy)
        trainloss.append(loss1)
        error = y_hat - batchy
        model.backward(error)

        # 评估模型性能
        loss2 = utils.cross_entropy(model.forward(validX),
                                    utils.make_onehot(validy, 10))
        validloss.append(loss2)

        # 保存当前模型
        params = []
        for ly in model.layers:
            if isinstance(ly, Linear):
                params.append(ly.W.copy())
        snapshot.append(params)

    print("iteration:{0}/{1} trainloss:{2:.2f}, validloss:{3:.2f}".\
        format(j, i, loss1, loss2))
コード例 #4
0
ファイル: main.py プロジェクト: nicedi/ML_course_projects
        # denoising auto-encoder
        # 可以向训练数据中注入噪声来提高模型性能。本次教学不讨论这方面内容。
        #noise_level = 0.2
        #noise = np.random.normal(0, noise_level, batchX.shape)
        #batchX = batchX + noise

        batchy = batchX.copy()  # 对于自编码器,其自身是目标值

        y_hat = model.forward(batchX)
        # MSE loss
        rec_error = y_hat - batchy
        trainloss.append((rec_error**2).mean())

        # backprop
        model.backward(rec_error)

        # evaluate
        test_error = model.forward(testX) - testX
        testloss.append((test_error**2).mean())
        snapshot.append((model.layers[0].W.copy(), model.layers[-1].W.copy()))

utils.plot_loss(trainloss, testloss)

#%% 还原数据,观察还原效果
# choose best model
best_idx = np.argmin(testloss)
encdec = snapshot[best_idx]
model.layers[0].W = encdec[0]
model.layers[2].W = encdec[1]