def test_score_function():
    mlp = MLP(epochs=20,
              eta=0.05,
              hidden_layers=[25],
              minibatches=5,
              random_seed=1)
    mlp.fit(X, y)
    acc = mlp.score(X, y)
    assert acc == 1.0, acc
def test_decay_function():
    mlp = MLP(epochs=20,
              eta=0.05,
              decrease_const=0.01,
              hidden_layers=[25],
              minibatches=5,
              random_seed=1)

    mlp.fit(X, y)
    assert mlp._decr_eta < mlp.eta
    acc = mlp.score(X, y)
    assert round(acc, 2) == 0.98, acc
import numpy as np 

X = np.asarray([[6.1,1.4],[7.7,2.3],[6.3,2.4],[6.4,1.8],[6.2,1.8],[6.9,2.1],
[6.7,2.4],[6.9,2.3],[5.8,1.9],[6.8,2.3],[6.7,2.5],[6.7,2.3],[6.3,1.9],[6.5,2.1 ],[6.2,2.3],[5.9,1.8]] ) 

X = (X - X.mean(axis=0)) / X.std(axis=0)

y = np.asarray([0,2,2,1,2,2,2,2,2,2,2,2,2,2,2,2]) 

nn = MLP(hidden_layers=[50],l2=0.00,l1=0.0,epochs=150,eta=0.05,
momentum=0.1,decrease_const=0.0,minibatches=1,random_seed=1,print_progress=3)
nn = nn.fit(X, y)

fig = plot_decision_regions(X=X, y=y, clf=nn, legend=2)
plt.show()
print('Accuracy(epochs = 150): %.2f%%' % (100 * nn.score(X, y)))

nn.epochs = 250
nn = nn.fit(X, y)
fig = plot_decision_regions(X=X, y=y, clf=nn, legend=2)
plt.title('epochs = 250')
plt.show()
print('Accuracy(epochs = 250): %.2f%%' % (100 * nn.score(X, y)))

plt.plot(range(len(nn.cost_)), nn.cost_)
plt.title('Gradient Descent training (minibatches=1)')
plt.xlabel('Epochs')
plt.ylabel('Cost')
plt.show()

nn.minibatches = len(y)
Beispiel #4
0
    plt.show()


plot_digit(X, y, 3500)

from mlxtend.preprocessing import standardize

X_train_std, params = standardize(X_train,
                                  columns=range(X_train.shape[1]),
                                  return_params=True)
X_test_std = standardize(X_test, columns=range(X_test.shape[1]), params=params)
nn1 = MLP(hidden_layers=[150],
          l2=0.00,
          l1=0.0,
          epochs=100,
          eta=0.005,
          momentum=0.0,
          decrease_const=0.0,
          minibatches=100,
          random_seed=1,
          print_progress=3)
nn1.fit(X_train_std, y_train)

plt.plot(range(len(nn1.cost_)), nn1.cost_)
plt.ylabel('Cost')
plt.xlabel('Epochs')
plt.show()

print('Train Accuracy: %.2f%%' % (100 * nn1.score(X_train_std, y_train)))
print('Test Accuracy: %.2f%%' % (100 * nn1.score(X_test_std, y_test)))
from mlxtend.classifier import MultiLayerPerceptron as MLP

nn1 = MLP(hidden_layers=[50],
          l2=0.00,
          l1=0.0,
          epochs=150,
          eta=0.05,
          momentum=0.1,
          decrease_const=0.0,
          minibatches=1,
          random_seed=1,
          print_progress=3)

nn1 = nn1.fit(X_std, y)

from mlxtend.plotting import plot_decision_regions
import matplotlib.pyplot as plt

fig = plot_decision_regions(X=X_std, y=y, clf=nn1, legend=2)
plt.title('Multi-layer Perceptron w. 1 camada oculta (sigmoidal)')
plt.show()

import matplotlib.pyplot as plt
plt.plot(range(len(nn1.cost_)), nn1.cost_)
plt.ylabel('Custo')
plt.xlabel('Épocas')
plt.show()

print('Precisão: %.2f%%' % (100 * nn1.score(X_std, y)))
Beispiel #6
0
from mlxtend.data import iris_data
X, y = iris_data()

# standardize training data
X_std = (X - X.mean(axis=0)) / X.std(axis=0)

from mlxtend.classifier import MultiLayerPerceptron as MLP

nn1 = MLP(hidden_layers=[10],
          l2=0.00,
          l1=0.0,
          epochs=10000,
          eta=0.001,
          momentum=0.1,
          decrease_const=0.0,
          minibatches=1,
          random_seed=1,
          print_progress=3)

nn1 = nn1.fit(X_std, y)
print('\nAccuracy: %.2f%%' % (100 * nn1.score(X_std, y)))
Beispiel #7
0
          decrease_const=0.0,
          minibatches=1,
          random_seed=1,
          print_progress=3)

nn1 = nn1.fit(X_std, y)
fig = plot_decision_regions(X=X_std, y=y, clf=nn1, legend=2)
plt.title('Multi-layer perception w. 1 hidden layer (logistic sigmod)')
plt.show()

plt.plot(range(len(nn1.cost_)), nn1.cost_)
plt.ylabel("Cost")
plt.xlabel("Epochs")
plt.show()

print 'Accuracy: %.2f%%' % (100 * nn1.score(X_std, y))


# Stochastic Gradient Descent

nn2 = MLP(hidden_layers=[50],
          l2=0.00,
          l1=0.0,
          epochs=5,
          eta=0.005,
          momentum=0.1,
          decrease_const=0.0,
          minibatches=len(y),
          random_seed=1,
          print_progress=3)
nn2.fit(X_std, y)