Exemplo n.º 1
0
def run_neural_net(connection, data):

    #import_modules()

    dataset = data

    data, target = dataset.data, dataset.target

    data_scalar = preprocessing.MinMaxScaler()
    target_scalar = preprocessing.MinMaxScaler()

    data = data_scalar.fit_transform(data)
    target = target_scalar.fit_transform(target.reshape(-1, 1))

    environment.reproducible()

    x_train, x_test, y_train, y_test = train_test_split(data,
                                                        target,
                                                        train_size=0.85)

    cgnet = algorithms.ConjugateGradient(
        connection,
        search_method='golden',
        show_epoch=5,
        verbose=True,
        addons=[algorithms.LinearSearch],
    )

    time_start = time.time()
    cgnet.train(x_train, y_train, x_test, y_test, epochs=50)
    time_end = time.time()

    #plots.error_plot(cgnet)

    y_predict = cgnet.predict(x_test).round(1)
    error = mae(target_scalar.inverse_transform(y_test), \
                  target_scalar.inverse_transform(y_predict))

    #print(time_end - time_start)

    #print(target_scalar.inverse_transform(y_test), \
    #              target_scalar.inverse_transform(y_predict))

    #print(error)

    return ([time_end - time_start, error])
Exemplo n.º 2
0
# for std_in in np.array([0.01, 0.1, 0.5, 0.8, 1.0, 1.2, 1.4, 2.0, 5.0, 10.0]):
#     grnnet = algorithms.GRNN(std=std_in, verbose=True)
#     grnnet.train(training_X, training_Y)
#     predicted = grnnet.predict(testing_X)
#     error = scorer(testing_Y, predicted)
#     print("GRNN RMSE = {:.5f}\n".format(error))
#     if error < best_error:
#         print("New Best Error Found: " + str(error))
#         best_error = error
#         best_model = grnnet

grnnet2 = algorithms.GRNN(std=0.2, verbose=True)
grnnet2.train(training_X, training_Y)
y_predicted = grnnet2.predict(testing_X)
print("RMSE = " + str(estimators.rmse(y_predicted, testing_Y.ravel())))
print("MAE = " + str(estimators.mae(y_predicted, testing_Y.ravel())))
actual_mae = y_data_scaler.inverse_transform(
    estimators.mae(y_predicted, testing_Y))
print("MAE (no. of shares) = " + str(actual_mae.squeeze()))

# Save the best GRNN model
import _pickle
with open(
        '/home/pier/Machine_Learning/KE5206NN/regression/regression_models/grnn.pkl',
        'wb') as fid:
    _pickle.dump(grnnet2, fid)

# GRNN Best values
#
# RMSE = 0.019625235702837703
# MAE = 0.004645349837893423
Exemplo n.º 3
0
    def test_mae(self):
        predicted = np.array([1, 2, 3])
        target = np.array([3, 2, 1])

        actual = estimators.mae(target, predicted)
        self.assertAlmostEqual(actual, 4 / 3.)
Exemplo n.º 4
0
    def test_mae(self):
        predicted = np.array([1, 2, 3])
        target = np.array([3, 2, 1])

        actual = estimators.mae(target, predicted)
        self.assertAlmostEqual(actual, 4 / 3.)
Exemplo n.º 5
0
from neupy import estimators
import _pickle

with open(
        '/Users/pierlim/PycharmProjects/KE5206NN/regression/regression_models/multi_layer_perceptron.pkl',
        'rb') as fid:
    mlp = _pickle.load(fid)

with open(
        '/Users/pierlim/PycharmProjects/KE5206NN/regression/regression_models/grnn.pkl',
        'rb') as fid:
    grnn = _pickle.load(fid)

y_mlp_predicted = mlp.predict(testing_X)
print("MLP RMSE = " + str(estimators.rmse(y_mlp_predicted, testing_Y.ravel())))
mlp_mae = estimators.mae(y_mlp_predicted, testing_Y.ravel())
print("MLP MAE = " + str(estimators.mae(y_mlp_predicted, testing_Y.ravel())))
actual_mae = y_data_scaler.inverse_transform(
    estimators.mae(y_mlp_predicted, testing_Y))
print("MLP MAE (no. of shares) = " + str(actual_mae.squeeze()))

y_grnn_predicted = grnn.predict(testing_X)
y_grnn_predicted = y_grnn_predicted.ravel().transpose()
grnn_mae = estimators.mae(y_grnn_predicted, testing_Y.ravel())
print("GRNN RMSE = " +
      str(estimators.rmse(y_grnn_predicted, testing_Y.ravel())))
print("GRNN MAE = " + str(estimators.mae(y_grnn_predicted, testing_Y.ravel())))
actual_mae = y_data_scaler.inverse_transform(
    estimators.mae(y_grnn_predicted, testing_Y))
print("GRNN MAE (no. of shares) = " + str(actual_mae.squeeze()))
print(y_grnn_predicted.shape)
Exemplo n.º 6
0
#
# training_X = x_data_scaler.transform(training_X)
# training_Y = y_data_scaler.transform(training_Y.reshape(-1, 1))
#
# testing_X = x_data_scaler.transform(testing_X)
# testing_Y = y_data_scaler.transform(testing_Y.reshape(-1, 1))
scaler = preprocessing.StandardScaler()

for n in range(training_X.shape[1]):
    training_X[n] = scaler.fit_transform(training_X[n].reshape(-1, 1))
    testing_X[n] = scaler.fit_transform(testing_X[n].reshape(-1, 1))

training_Y = scaler.fit_transform(training_X[' shares'].reshape(-1, 1))
testing_Y = scaler.fit_transform(testing_X[' shares'].reshape(-1, 1))

# MLPP Scikit-Learn - RMSE 0.01567453951930472
from sklearn.neural_network import MLPRegressor
from scipy import stats
from sklearn.grid_search import RandomizedSearchCV
from neupy import estimators

mlp = MLPRegressor(hidden_layer_sizes=(10, 10, 10), max_iter=2000, activation='tanh', solver='sgd',
                   learning_rate='constant', early_stopping=True, learning_rate_init=0.04, alpha=100, beta_1=0.616,
                   beta_2=0.194)
mlp.fit(training_X, training_Y)
y_predicted = mlp.predict(testing_X)
print("RMSE = " + str(estimators.rmse(y_predicted, testing_Y.ravel())))
print("MAE = " + str(estimators.mae(y_predicted, testing_Y.ravel())))
actual_mae = y_data_scaler.inverse_transform(estimators.mae(y_predicted, testing_Y))
print("MAE (no. of shares) = " + str(actual_mae.squeeze()))