Ejemplo n.º 1
0
def compare_train(train_scaled, y_predicted):
    predictions = list()
    for i in range(len(train_scaled)):
        X = train_scaled[i, 0:-1]
        yhat = y_predicted[i]
        yhat = data_misc.invert_scale(scaler, X, yhat)
        predictions.append(yhat)

    # Se empieza desde uno ya que el primer dato no se puede tomar en cuenta por diff.
    rmse = sqrt(mean_squared_error(raw_values[1:split + 1], predictions))
    return rmse
Ejemplo n.º 2
0
def compare_train(y_test, y_predicted):
    predictions = list()
    for i in range(len(y_predicted)):
        X = x_train[i]
        yhat = y_predicted[i]
        yhat = data_misc.invert_scale(scaler, X, yhat)
        predictions.append(yhat)

    d = raw_values[window_size:split + window_size]
    rmse = sqrt(mean_squared_error(d, predictions))
    return rmse, predictions
Ejemplo n.º 3
0
def compare_train(len_y_train=0, y_predicted=[]):
    predictions = list()
    d = avg_values[total_window_size:split + total_window_size + 1]
    for i in range(len_y_train):
        X = x_train[i]
        yhat = y_predicted[i]
        yhat = data_misc.invert_scale(scaler, X, yhat)
        yhat = data_misc.inverse_difference(d, yhat, len_y_train + 1 - i)
        predictions.append(yhat)

    d = avg_values[total_window_size + 1:split + total_window_size + 1]
    rmse = sqrt(mean_squared_error(d, predictions))
    return rmse, predictions
Ejemplo n.º 4
0
def compare_test(test_scaled, y_predicted):
    predictions = list()
    for i in range(len(test_scaled)):
        X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
        yhat = y_predicted[i]
        yhat = data_misc.invert_scale(scaler, X, yhat)
        predictions.append(yhat)

    # Se aumenta uno ya que el primer dato no se puede tomar en cuenta por diff.
    d = raw_values[split + 1:]
    rmse = sqrt(mean_squared_error(d, predictions))

    return rmse, predictions
Ejemplo n.º 5
0
def compare_train(window_size, scaler, len_y_train, x_train, y_predicted=[]):
    predictions = list()
    d = dc.avg_values[window_size:dc.split_train_test + window_size + 1]
    for i in range(len_y_train):
        X = x_train[i]
        yhat = y_predicted[i]
        yhat = data_misc.invert_scale(scaler, X, yhat)
        yhat = data_misc.inverse_difference(d, yhat, len_y_train + 1 - i)
        predictions.append(yhat)

    # the +1 represents the drop that we did when the data was diff
    d = dc.avg_values[window_size + 1:dc.split_train_test + window_size + 1]
    rmse = sqrt(mean_squared_error(d, predictions))
    return rmse, predictions
Ejemplo n.º 6
0
def compare_train(train_scaled, y_predicted):
    predictions = list()
    for i in range(len(train_scaled)):
        X, y = train_scaled[i, 0:-1], train_scaled[i, -1]
        yhat = y_predicted[i]
        yhat = data_misc.invert_scale(scaler, X, yhat)
        # Se agrega split+1 ya para que sea consecuente con la data para realizar el diff.
        yhat = data_misc.inverse_difference(raw_values[:split + 1], yhat, len(train_scaled) + 1 - i)
        # print(yhat)
        predictions.append(yhat)
    d = raw_values[1:split + 1]
    # Se empieza desde uno ya que el primer dato no se puede tomar en cuenta por diff.
    rmse = sqrt(mean_squared_error(d, predictions))
    return rmse
Ejemplo n.º 7
0
def compare_test(test_scaled, y_predicted):
    predictions = list()
    for i in range(len(test_scaled)):
        X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
        yhat = y_predicted[i]
        # predictions.append(yhat)
        yhat = data_misc.invert_scale(scaler, X, yhat)
        yhat = data_misc.inverse_difference(y_raw_supervised[split:], yhat, len(test_scaled) + 1 - i)
        # print(yhat)
        predictions.append(yhat)

    # Se aumenta uno ya que el primer dato no se puede tomar en cuenta por diff.
    d = raw_values[1 + split:]
    # d = test_scaled[:, -1]
    rmse = sqrt(mean_squared_error(d, predictions))

    return rmse, predictions
Ejemplo n.º 8
0
def compare_test(len_y_test=0, y_predicted=[]):
    predictions = list()
    for i in range(len_y_test):
        X = x_test[i]
        yhat = y_predicted[i]
        yhat = data_misc.invert_scale(scaler, X, yhat)

        # Stationary
        d = avg_values[split + total_window_size - 1:]
        yhat = data_misc.inverse_difference(d, yhat, len_y_test + 1 - i)

        predictions.append(yhat)

    d = avg_values[split + total_window_size + 1:]
    # d = avg_values[split + window_size :]
    rmse = sqrt(mean_squared_error(d, predictions))
    # rmse = sqrt(mean_squared_error(y_test, y_predicted))
    return rmse, predictions
Ejemplo n.º 9
0
def compare_test(window_size, scaler, len_y_test, x_test, y_predicted=[]):
    predictions = list()
    for i in range(len_y_test):
        X = x_test[i]
        yhat = y_predicted[i]
        yhat = data_misc.invert_scale(scaler, X, yhat)

        # Stationary
        d = dc.avg_values[dc.split_train_test + window_size - 1:]
        yhat = data_misc.inverse_difference(d, yhat, len_y_test + 1 - i)

        predictions.append(yhat)

    # the +1 represents the drop that we did when the data was diff
    d = dc.avg_values[dc.split_train_test + window_size + 1:]
    # d = avg_values[split + window_size :]
    rmse = sqrt(mean_squared_error(d, predictions))
    # rmse = sqrt(mean_squared_error(y_test, y_predicted))
    return rmse, predictions
Ejemplo n.º 10
0
def compare_val(window_size, scaler, len_y_val, x_val, y_predicted=[]):
    predictions = list()
    for i in range(len_y_val):
        X = x_val[i]
        yhat = y_predicted[i]
        yhat = data_misc.invert_scale(scaler, X, yhat)

        # Stationary
        #print(yhat)
        # Limit between the train and the val. the val d works from the end to the start.
        d = dc.avg_values[dc.split_train_val + window_size:dc.split_train_val + window_size + 1 + len_y_val]
        yhat = data_misc.inverse_difference(d, yhat, len_y_val + 1 - i)

        predictions.append(yhat)

    #print('Predictions')
    #print(predictions)

    # the +1 represents the drop that we did when the data was diff
    d = dc.avg_values[dc.split_train_val + window_size + 1: dc.split_train_val + window_size + 1 + dc.split_val_test]
    # d = avg_values[split + window_size :]
    rmse = sqrt(mean_squared_error(d, predictions))
    # rmse = sqrt(mean_squared_error(y_test, y_predicted))
    return rmse, predictions
Ejemplo n.º 11
0
print(regr.intercept_)

y_predicted = regr.predict(X_test)

print('y_test: ')
print(y_test)
print('y_predicted: ')
print(y_predicted)

predictions = list()
for i in range(len(test_scaled)):
    X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
    yhat = y_predicted[i]
    print("Y_test: " + str(y) + " Yhat: " + str(yhat))

    yhat = data_misc.invert_scale(scaler, X, yhat)
    #print("yhat no scaled:" + str(yhat))
    yhat = data_misc.inverse_difference(raw_values, yhat, len(test_scaled) -0 - i)
    #print("yhat no difference:" + str(yhat))
    # store forecast
    predictions.append(yhat)


rmse = sqrt(mean_squared_error(raw_values[-10:], predictions))
print('Test RMSE: %.7f' % (rmse))
misc.plot_line_graph('Test vs Predicted Data', raw_values[-10:], predictions)
misc.print_comparison_list('Raw', raw_values[-10:], predictions)

misc.plot_data_graph('All Data', raw_values)

misc.plot_data_graph('Training Data', raw_values[:-10])
Ejemplo n.º 12
0
                          nb_epoch=nb_epoch,
                          neurons=neurons)
    # forecast the entire training dataset to build up state for forecasting
    # train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
    # lstm_model.predict(train_reshaped, batch_size=1)
    # walk-forward validation on the test data
    predictions = list()
    normal_y = list()
    for i in range(len(test_scaled)):
        # make one-step forecast
        x_train, y_train = test_scaled[i, 0:-1], test_scaled[i, -1]

        # Forecast for test
        y_predicted = forecast_lstm(lstm_model, 1, x_train)
        # invert scaling
        y_predicted = data_misc.invert_scale(scaler, x_train, y_predicted)
        y = data_misc.invert_scale(scaler, x_train, y_train)

        # invert differencing test
        # We get the history and we cut because we pair with the supervised that was cut before. It was full of zeroes
        d = raw_values[split + window_size - 1:]
        y_predicted = data_misc.inverse_difference(d, y_predicted,
                                                   len(test_scaled) + 1 - i)
        y = data_misc.inverse_difference(d, y, len(test_scaled) + 1 - i)

        # print(" Y_test: " + str(y) + " Yhat: " + str(yhat) + " yraw:" + str(raw_values[i + len(train) + 1]))
        # store forecast
        normal_y.append(y)
        predictions.append(y_predicted)

    # report performance