コード例 #1
0
def test():
    nn = NeuralNetwork(shape)
    print("0", nn.forward([0, 0]))
    print("fitness", fitness(nn), "\n")
    for i in range(10):
        nn.mutate(1)
        print("0", nn.forward([0, 0]))
        print("fitness", fitness(nn), "\n")
コード例 #2
0
def __test_custom_nn(train_test):
    i = 0
    attempts = 50

    # best_metrics: an array-vector where
    #  best_metrics[0] - best accuracy,
    #  best_metrics[1] - best precision,
    #  best_metrics[2] - best recall
    best_metrics = [0, 0, 0]

    # reshape target sets to match current network format
    __reshape(train_test)

    while i < attempts:
        i += 1
        nn = NeuralNetwork()
        t = Trainer(nn)
        t.train(train_test[0], train_test[1])
        prd = nn.forward(train_test[2])
        prd = map(lambda el: round(el), prd)
        best_metrics = __get_metrics(train_test[3], prd, best_metrics)

    __print_metrics('Custom neural network', best_metrics)
コード例 #3
0
topo = [N_input, 6, N_output]
alpha = 1
lambdaa = 0.0001
print("nn topo : {}".format(topo))

## Momentum
learning_rate = MomentumLearningRate(learning_rate=alpha, beta=0.9)

network = NeuralNetwork(topo=topo,
                        alpha=alpha,
                        learning_rate=learning_rate,
                        lambdaa=lambdaa,
                        regularization=L2Regularization).initialize()

for epoch in range(300):
    network.forward(train_X)
    network.backward(train_Y)
    if epoch % 10 == 0:
        train_loss = np.mean(network.loss(train_X, train_Y))
        test_loss = np.mean(network.loss(test_X, test_Y))
        test_acc = network.accuracy(test_X, test_Y)
        print("Epoch:{} Training Loss:{} Test Loss:{} Test Acc:{}".format(
            epoch, train_loss, test_loss, test_acc))
## final
train_loss = np.mean(network.loss(train_X, train_Y))
test_loss = np.mean(network.loss(test_X, test_Y))
test_acc = network.accuracy(test_X, test_Y)
print("Epoch:{} Training Loss:{} Test Loss:{} Test Acc:{}".format(
    "Final", train_loss, test_loss, test_acc))

# pre = (network.predict(test_X)>0.5).astype(float)
コード例 #4
0
ファイル: main.py プロジェクト: nicedi/ML_course_projects
for i in range(n_iter):
    # 每一轮迭代前,产生一组新的序号(目的在于置乱数据)
    idxs = np.random.permutation(trainX.shape[0])

    for j in range(0, trainX.shape[0], batchsize):
        batchX = trainX[idxs[j:j + batchsize]]

        # 任务3:实现utils中 make_onehot 函数

        batchy = utils.make_onehot(trainy[idxs[j:j + batchsize]], 10)
        # 数据的前馈(feed forward)和误差的反向传播(back propagation)
        # 是人工神经网络中的两种数据流向,这里用 forward 和 backward 为下列
        # 两个方法命名是与其它大型机器学习框架对人工神经网络中有关方法的命名保持一致

        y_hat = model.forward(batchX)

        # 任务4:理解utils中cross_entropy的实现代码

        loss1 = utils.cross_entropy(y_hat, batchy)
        trainloss.append(loss1)
        error = y_hat - batchy
        model.backward(error)

        # 评估模型性能
        loss2 = utils.cross_entropy(model.forward(validX),
                                    utils.make_onehot(validy, 10))
        validloss.append(loss2)

        # 保存当前模型
        params = []
コード例 #5
0
ファイル: std_bp.py プロジェクト: raven1989/MachineLearning
print("Train Y : {}".format(train_Y))
print("Test X : {}".format(test_X))
print("Test Y : {}".format(test_Y))

topo = [np.reshape(X[0], -1).shape[0], 5, 1]
alpha = 1
lambdaa = 0.00001
print("nn topo : {}".format(topo))
network = NeuralNetwork(topo=topo,
                        alpha=alpha,
                        lambdaa=lambdaa,
                        regularization=L2Regularization).initialize()

for epoch in range(1000):
    for x, y in zip(train_X, train_Y):
        network.forward(np.reshape(x, newshape=(1, x.shape[0])))
        network.backward(np.reshape(y, newshape=(1, y.shape[0])))
    if epoch % 100 == 0:
        # loss = []
        # for x,y in zip(X,Y):
        #   loss.append(network.loss(x, y))
        train_loss = np.mean(network.loss(train_X, train_Y))
        test_loss = np.mean(network.loss(test_X, test_Y))
        test_acc = network.accuracy(test_X, test_Y)
        print("Epoch:{} Training Loss:{} Test Loss:{} Test Acc:{}".format(
            epoch, train_loss, test_loss, test_acc))

# pre = (network.predict(test_X)>0.5).astype(float)
pre = network.predict(test_X)
print("Predict : {}".format(pre))
コード例 #6
0
ファイル: main.py プロジェクト: guiruiz/neural-network-python
numgrad = util.computeNumericalGradient(NN, trainX, trainY)
grad = NN.computeGradients(trainX, trainY)
norm = np.linalg.norm(grad - numgrad) / np.linalg.norm(grad + numgrad)
print(norm)  # Should be less than 1e-8:

T = Trainer(NN)
T.train(trainX, trainY, testX, testY)
# util.showCostPlot(T.J, T.testJ)

#Generate Test Data
hoursSleep = np.linspace(0, 10, 100)
hoursStudy = np.linspace(0, 5, 100)
# Normalize data
hoursSleepNorm = hoursSleep / 10.
hoursStudyNorm = hoursStudy / 5.
#Create 2-d versions of input for plotting
a, b = np.meshgrid(hoursSleepNorm, hoursStudyNorm)
#Join into a single ionput matrix
allInputs = np.zeros((a.size, 2))
allInputs[:, 0] = a.ravel()
allInputs[:, 1] = b.ravel()

allOutputs = NN.forward(allInputs)

# Calc values for graph ploting
yy = np.dot(hoursStudy.reshape(100, 1), np.ones((1, 100)))
xx = np.dot(hoursSleep.reshape(100, 1), np.ones((1, 100))).T
util.showContourPlot(xx, yy, allOutputs)
util.show3DPlot(xx, yy, allOutputs)
util.showProjectionsPlot(trainX_orig, trainY_orig)
コード例 #7
0
ファイル: main.py プロジェクト: nicedi/ML_course_projects
for i in range(n_iter):
    # 每一轮迭代前,产生一组新的序号(目的在于置乱数据)
    idxs = np.random.permutation(trainX.shape[0])

    for j in range(0, trainX.shape[0], batchsize):
        batchX = trainX[idxs[j:j + batchsize]]

        # denoising auto-encoder
        # 可以向训练数据中注入噪声来提高模型性能。本次教学不讨论这方面内容。
        #noise_level = 0.2
        #noise = np.random.normal(0, noise_level, batchX.shape)
        #batchX = batchX + noise

        batchy = batchX.copy()  # 对于自编码器,其自身是目标值

        y_hat = model.forward(batchX)
        # MSE loss
        rec_error = y_hat - batchy
        trainloss.append((rec_error**2).mean())

        # backprop
        model.backward(rec_error)

        # evaluate
        test_error = model.forward(testX) - testX
        testloss.append((test_error**2).mean())
        snapshot.append((model.layers[0].W.copy(), model.layers[-1].W.copy()))

utils.plot_loss(trainloss, testloss)

#%% 还原数据,观察还原效果