Пример #1
0
        NormalizationLayer(0, 255, -0.1, 0.1),
        LinearLayer(784, 10, weights='norm_random'),
        # TanhLayer,
        # LinearLayer(50, 10, weights='norm_random'),
        # TanhLayer,
        # NormalizationLayer(0,10,0,1),
        # SigmoidLayer()
    ])

# display = ShowTraining(epochs_num = epochs)

trainer = Trainer(show_training=False)  #, show_function = display.show)

J_list, dJdy_list, J_test = trainer.learn(
    model=model,
    train=train,
    test=test,
    # loss = NegativeLogLikelihoodLoss(),
    loss=CrossEntropyLoss(),
    # loss = SquaredLoss(),
    # optimizer = GradientDescent(learning_rate=0.3),
    optimizer=GradientDescentMomentum(learning_rate=0.35 / 10, momentum=0.5),
    epochs=epochs,
    batch_size=10)

test_results(model, train, test)

raw_input('Press ENTER to exit')

model.save('model.net')
Пример #2
0
data_train = np.random.rand(1000, 2) * 5
train = []
for x in data_train:
    out = Q_hat.forward(x)
    train.append(
        Q_hat.forward(x) * utils.to_one_hot_vect(np.argmax(out), out.size))

data_test = np.random.rand(1000, 2) * 5
test = []
for x in data_test:
    out = Q_hat.forward(x)
    test.append(
        Q_hat.forward(x) * utils.to_one_hot_vect(np.argmax(out), out.size))

J_list, dJdy_list, J_test = trainer.learn(
    model=Q,
    train=zip(data_train, train),
    test=zip(data_test, test),
    # loss = NegativeLogLikelihoodLoss(),
    # loss = CrossEntropyLoss(),
    loss=SquaredLoss(),
    # optimizer = GradientDescent(learning_rate=0.3),
    optimizer=GradientDescentMomentum(learning_rate=0.35, momentum=0.5),
    epochs=epochs,
    batch_size=100)

raw_input('Press ENTER to exit')

Q.save('model.net')