예제 #1
0
mse = np.zeros((20, 1))
rmse = np.zeros((20, 1))
mean_time = 0
#cost = []
for i in range(20):
    X_train, X_test, Y_train, Y_test = train_test_split(dataset[:, :2],
                                                        dataset[:, 2],
                                                        test_size=0.80)
    Y_train = Y_train.reshape((Y_train.shape[0], 1))
    Y_test = Y_test.reshape((Y_test.shape[0], 1))

    start_time = time.clock()
    adaline = Adaline(eta=0.01, n_iter=200)
    adaline.fit(X_train, Y_train)
    Y_hat = adaline.predict(X_test)
    mean_time += (time.clock() - start_time) / 20

    mse[i] = ((Y_test - Y_hat)**2).mean(axis=0)
    rmse[i] = mse[i]**(1. / 2)
    #cost.append(adaline.error)

print("Mean execution time", mean_time)
print("Standard Deviation (MSE)", np.std(mse, axis=0))
print("Standard Deviation (RMSE)", np.std(rmse, axis=0))
'''
fig, ax = plt.subplots()
plt.plot(range(1, len(cost[0]) + 1), cost[0], "o-")
plt.title("Cost")
plt.xlabel("epoch")
plt.ylabel("cost")
예제 #2
0
            c=y,
            cmap=cm_bright)
plt.scatter(None, None, color='r', label='Versicolor')
plt.scatter(None, None, color='b', label='Setosa')
plt.legend()
plt.title('Visualizacao do Dataset (flores Iris)')
plt.savefig('train.png')

# Adaline com 4 entradas
adaline = Adaline(4)
# Treinamento
adaline.train(x, y)

## Test 1
A = [0.4329, -1.3719, 0.7022, -0.8535]  # Versicolor (1)
predict = adaline.predict(A)
print('## Teste 1')
print('Entrada: ', A)
print('Classe esperada: Versicolor (1)')
if predict == 1:
    print('Previsão: Versicolor (1)')
else:
    print('Previsão: Setosa (-1)')
#=> 1

## Test 2
B = [0.3024, 0.2286, 0.8630, 2.7909]  # Setosa (-1)
predict = adaline.predict(B)
print('## Teste 2')
print('Entrada: ', B)
print('Classe esperada: Setosa (-1)')
예제 #3
0
x = 0
teste = 1
tam = len(E)
while (treino):
    print("Geração: ", x)
    x += 1
    eqm_ant = t5.EQM(tam, E, S)
    for i in range(tam):
        t5.treino(E[i], S[i])
        teste = 2
    eqm_atual = t5.EQM(tam, E, S)
    if (t5.testeErro(eqm_ant, eqm_atual)):
        print("Treinamento Acabou!!")
        treino = False

print(t5.predict([0.9694, 0.6909, 0.4334, 3.4965]))
print(t5.predict([0.5427, 1.3832, 0.6390, 4.0352]))
print(t5.predict([0.6081, -0.9196, 0.5925, 0.1016]))
print(t5.predict([-0.1618, 0.4694, 0.2030, 3.0117]))
print(t5.predict([0.1870, -0.2578, 0.6124, 1.7749]))
print(t5.predict([0.4891, -0.5279, 0.4378, 0.6439]))
print(t5.predict([0.3777, 2.0149, 0.7423, 3.3932]))
print(t5.predict([1.1498, -0.4067, 0.2469, 1.5866]))
print(t5.predict([0.9325, 1.0950, 1.0359, 3.3591]))
print(t5.predict([0.5060, 1.3317, 0.9222, 3.7174]))
print(t5.predict([0.0497, -2.0656, 0.6124, -0.6585]))
print(t5.predict([0.4004, 3.5369, 0.9766, 5.3532]))
print(t5.predict([-0.1874, 1.3343, 0.5374, 3.2189]))
print(t5.predict([0.5060, 1.3317, 0.9222, 3.7174]))
teste = 1
print(t5.predict([1.6375, -0.7911, 0.7537, 0.5515]))
    for learning_rate in [0.2, 0.02, 0.002, 0.0002, 0.00002, 0.000002]:
        print "Testing learning rate = %f" % learning_rate
        data_indices = [idx for idx in xrange(data_instances.shape[0])]
        # 10-fold cross validation
        fold_size = (data_instances.shape[0]) / 10
        total_performance = 0.0
        for holdout_fold_idx in xrange(5):
            # training_indices = data_indices - holdout_fold indices
            training_indices = np.array(
                np.setdiff1d(
                    data_indices,
                    data_indices[fold_size * holdout_fold_idx : \
                                 fold_size * holdout_fold_idx + fold_size]))
            # test_indices = holdout_fold indices
            test_indices = np.array([
                i for i in xrange(fold_size * holdout_fold_idx, fold_size *
                                  holdout_fold_idx + fold_size)
            ])

            model = Adaline(20.0, learning_rate)
            # Train the model
            model.train(data_instances[training_indices])
            # Test performance on test set
            predictions = model.predict(data_instances[test_indices, :-1])
            total_performance += \
                sum(predictions == data_instances[test_indices, -1]) / \
                float(test_indices.shape[0])
        print "Average overall classification rate: %f" % (total_performance /
                                                           10)
예제 #5
0
from adaline import Adaline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

df = pd.read_csv('/home/erick/Repo/Machine Learning/Adaline/iris.csv',
                 header=None)

y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)

X = df.iloc[0:100, [0, 2]].values

adn = Adaline()
adn.fit(X, y)

setosa_example = [5.2, 1.8]
versicolor_example = [6.4, 4.6]

print adn.predict(setosa_example)
print adn.predict(versicolor_example)