def test_score_function_ftrl(): mlp = MLP( epochs=100, eta=0.5, hidden_layers=[5], optimizer="ftrl", activations=["logistic"], minibatches=1, random_seed=1 ) mlp.fit(X, y) acc = mlp.score(X, y) assert acc == 1.0, acc
def test_score_function_adagrad(): mlp = MLP(epochs=100, eta=0.5, hidden_layers=[5], optimizer='adagrad', activations=['logistic'], minibatches=1, random_seed=1) mlp.fit(X, y) acc = mlp.score(X, y) assert acc == 1.0, acc
def test_multiclass_gd_dropout(): mlp = MLP(epochs=100, eta=0.5, hidden_layers=[5], optimizer='gradientdescent', activations=['logistic'], minibatches=1, random_seed=1, dropout=0.05) mlp.fit(X, y) acc = round(mlp.score(X, y), 2) assert acc == 0.67, acc
print_progress=3, minibatches=1, random_seed=1) nn1 = nn1.fit(X_std, y) fig = plot_decision_regions(X=X_std, y=y, clf=nn1, legend=2) plt.title('Multi-layer perception w. 1 hidden layer (logistic sigmod)') plt.show() plt.plot(range(len(nn1.cost_)), nn1.cost_) plt.ylabel("Cost") plt.xlabel("Epochs") plt.show() print 'Accuracy: %.2f%%' % (100 * nn1.score(X_std, y)) # Stochastic Gradient Descent nn2 = TfMultiLayerPerceptron(eta=0.5, epochs=20, hidden_layers=[10], activations=['logistic'], optimizer='gradientdescent', print_progress=3, minibatches=len(y), random_seed=1) nn2.fit(X_std, y) fig = plot_decision_regions(X=X_std, y=y, clf=nn2, legend=2)