コード例 #1
0
def compare(y_raw_test, y_predicted):
    predictions = list()
    for i in range(len(y_predicted)):
        yhat = y_predicted[i]
        yhat = data_misc.inverse_difference(raw_values, yhat,
                                            len(y_test) + 1 - i)
        predictions.append(yhat)

    rmse = sqrt(mean_squared_error(y_raw_test, predictions))
    return rmse, predictions
コード例 #2
0
def compare_train(train_scaled, y_predicted):
    predictions = list()
    d = raw_values[0:split + 1]
    for i in range(len(train_scaled)):
        yhat = y_predicted[i]
        yhat = data_misc.inverse_difference(raw_values[0:split + 1], yhat,
                                            len(train_scaled) + 1 - i)
        predictions.append(yhat)
    # d = train_scaled[:, 0:-1]
    rmse = sqrt(mean_squared_error(raw_values[1:split + 1], predictions))
    return rmse
コード例 #3
0
def compare_train(y_test, y_predicted):
    predictions = list()
    d = raw_values[window_size:split + window_size + 1]
    for i in range(len(y_train)):
        yhat = y_predicted[i]
        yhat = data_misc.inverse_difference(d, yhat, len(y_train) + 1 - i)
        predictions.append(yhat)

    d = raw_values[window_size + 1:split + window_size + 1]
    rmse = sqrt(mean_squared_error(d, predictions))
    return rmse, predictions
コード例 #4
0
ファイル: shuffle_test.py プロジェクト: davis-9fv/Project
def compare_test(y_test, y_predicted):
    predictions = list()
    for i in range(len(y_test)):
        yhat = y_predicted[i]
        d = raw_values
        yhat = data_misc.inverse_difference(d, yhat, len(y_test) + 1 - i)
        predictions.append(yhat)

    d = raw_values[split + 1:]
    rmse = sqrt(mean_squared_error(d, predictions))
    # rmse = sqrt(mean_squared_error(y_test, y_predicted))
    return rmse, predictions
コード例 #5
0
def compare_train(len_y_train=0, y_predicted=[]):
    predictions = list()
    d = avg_values[total_window_size:split + total_window_size + 1]
    for i in range(len_y_train):
        X = x_train[i]
        yhat = y_predicted[i]
        yhat = data_misc.invert_scale(scaler, X, yhat)
        yhat = data_misc.inverse_difference(d, yhat, len_y_train + 1 - i)
        predictions.append(yhat)

    d = avg_values[total_window_size + 1:split + total_window_size + 1]
    rmse = sqrt(mean_squared_error(d, predictions))
    return rmse, predictions
コード例 #6
0
def compare_train(train_scaled, y_predicted):
    predictions = list()
    for i in range(len(train_scaled)):
        X, y = train_scaled[i, 0:-1], train_scaled[i, -1]
        yhat = y_predicted[i]
        yhat = data_misc.invert_scale(scaler, X, yhat)
        # Se agrega split+1 ya para que sea consecuente con la data para realizar el diff.
        yhat = data_misc.inverse_difference(raw_values[:split + 1], yhat, len(train_scaled) + 1 - i)
        # print(yhat)
        predictions.append(yhat)
    d = raw_values[1:split + 1]
    # Se empieza desde uno ya que el primer dato no se puede tomar en cuenta por diff.
    rmse = sqrt(mean_squared_error(d, predictions))
    return rmse
コード例 #7
0
ファイル: data_validation.py プロジェクト: davis-9fv/Project
def compare_train(window_size, scaler, len_y_train, x_train, y_predicted=[]):
    predictions = list()
    d = dc.avg_values[window_size:dc.split_train_test + window_size + 1]
    for i in range(len_y_train):
        X = x_train[i]
        yhat = y_predicted[i]
        yhat = data_misc.invert_scale(scaler, X, yhat)
        yhat = data_misc.inverse_difference(d, yhat, len_y_train + 1 - i)
        predictions.append(yhat)

    # the +1 represents the drop that we did when the data was diff
    d = dc.avg_values[window_size + 1:dc.split_train_test + window_size + 1]
    rmse = sqrt(mean_squared_error(d, predictions))
    return rmse, predictions
コード例 #8
0
def compare_test(test_scaled, y_predicted):
    predictions = list()
    for i in range(len(test_scaled)):
        X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
        yhat = y_predicted[i]
        # predictions.append(yhat)
        yhat = data_misc.invert_scale(scaler, X, yhat)
        yhat = data_misc.inverse_difference(y_raw_supervised[split:], yhat, len(test_scaled) + 1 - i)
        # print(yhat)
        predictions.append(yhat)

    # Se aumenta uno ya que el primer dato no se puede tomar en cuenta por diff.
    d = raw_values[1 + split:]
    # d = test_scaled[:, -1]
    rmse = sqrt(mean_squared_error(d, predictions))

    return rmse, predictions
コード例 #9
0
def compare(y_test, y_predicted):
    predictions = list()
    for i in range(len(y_test)):
        X = x_test[i]
        yhat = y_predicted[i]
        yhat = sc.inverse_transform(yhat)
        yhat = yhat.flatten()
        d = raw_values[upper_train:]

        yhat = data_misc.inverse_difference(d, yhat[0], len(y_test) + 1 - i)
        predictions.append(yhat)

    z = raw_values[upper_train:upper_train + testset_length]

    rmse = sqrt(mean_squared_error(z, predictions))
    # rmse = sqrt(mean_squared_error(y_test, y_predicted))
    return rmse, predictions
コード例 #10
0
def compare_test(len_y_test=0, y_predicted=[]):
    predictions = list()
    for i in range(len_y_test):
        X = x_test[i]
        yhat = y_predicted[i]
        yhat = data_misc.invert_scale(scaler, X, yhat)

        # Stationary
        d = avg_values[split + total_window_size - 1:]
        yhat = data_misc.inverse_difference(d, yhat, len_y_test + 1 - i)

        predictions.append(yhat)

    d = avg_values[split + total_window_size + 1:]
    # d = avg_values[split + window_size :]
    rmse = sqrt(mean_squared_error(d, predictions))
    # rmse = sqrt(mean_squared_error(y_test, y_predicted))
    return rmse, predictions
コード例 #11
0
ファイル: data_validation.py プロジェクト: davis-9fv/Project
def compare_test(window_size, scaler, len_y_test, x_test, y_predicted=[]):
    predictions = list()
    for i in range(len_y_test):
        X = x_test[i]
        yhat = y_predicted[i]
        yhat = data_misc.invert_scale(scaler, X, yhat)

        # Stationary
        d = dc.avg_values[dc.split_train_test + window_size - 1:]
        yhat = data_misc.inverse_difference(d, yhat, len_y_test + 1 - i)

        predictions.append(yhat)

    # the +1 represents the drop that we did when the data was diff
    d = dc.avg_values[dc.split_train_test + window_size + 1:]
    # d = avg_values[split + window_size :]
    rmse = sqrt(mean_squared_error(d, predictions))
    # rmse = sqrt(mean_squared_error(y_test, y_predicted))
    return rmse, predictions
コード例 #12
0
def compare(y_raw_test, y_predicted):
    predictions = list()
    predictions2 = list()
    train_tmp = list()
    for i in range(len(train)):
        train_tmp.append(train[i, -1])

    sum_first_values = raw_values[2]
    for i in range(len(y_predicted)):
        yhat = y_predicted[i]
        yhat = data_misc.inverse_difference(raw_values, yhat,
                                            len(y_test) + 1 - i)
        predictions.append(yhat)

        a = data_misc.inverse_difference2(sum_first_values, train_tmp,
                                          y_predicted[i])
        train_tmp.append(y_predicted[i])
        predictions2.append(a)

    rmse = sqrt(mean_squared_error(y_raw_test, predictions))
    rmse2 = sqrt(mean_squared_error(y_raw_test, predictions2))
    print('predictions2: %i' % (rmse2))
    return rmse, predictions
コード例 #13
0
ファイル: data_validation.py プロジェクト: davis-9fv/Project
def compare_val(window_size, scaler, len_y_val, x_val, y_predicted=[]):
    predictions = list()
    for i in range(len_y_val):
        X = x_val[i]
        yhat = y_predicted[i]
        yhat = data_misc.invert_scale(scaler, X, yhat)

        # Stationary
        #print(yhat)
        # Limit between the train and the val. the val d works from the end to the start.
        d = dc.avg_values[dc.split_train_val + window_size:dc.split_train_val + window_size + 1 + len_y_val]
        yhat = data_misc.inverse_difference(d, yhat, len_y_val + 1 - i)

        predictions.append(yhat)

    #print('Predictions')
    #print(predictions)

    # the +1 represents the drop that we did when the data was diff
    d = dc.avg_values[dc.split_train_val + window_size + 1: dc.split_train_val + window_size + 1 + dc.split_val_test]
    # d = avg_values[split + window_size :]
    rmse = sqrt(mean_squared_error(d, predictions))
    # rmse = sqrt(mean_squared_error(y_test, y_predicted))
    return rmse, predictions
コード例 #14
0
        supervised = data_misc.timeseries_to_supervised(diff_values, 1)
        supervised_values = supervised.values
        train, test = supervised_values[0:-1], supervised_values[-1:]
        test_scaled = scaler.transform(test)
        X, y = test_scaled[0, 0:-1], test_scaled[0, -1]
    else:
        flag = True
        X, y = test_scaled[i, 0:-1], test_scaled[i, -1]

    yhat = regr.predict([X])
    print("Y_test: " + str(y) + " Yhat: " + str(yhat))

    yhat = data_misc.invert_scale(scaler, X, yhat)

    # Se recorre -1 porque para que no se alinie donde empezó
    yhat = data_misc.inverse_difference(raw_values, yhat, -1 - i)
    # store forecast
    predictions.append(yhat)
    allList.append(yhat)

    # df = DataFrame(raw_values)
    # print(df[0][i])
    # columns = [df[0][i] for i in range(0,df.size)]
    # columns.append(yhat)
    raw_values = allList
    # print(columns)

rmse = sqrt(mean_squared_error(raw_values[-20:], predictions))
print('Test RMSE: %.7f' % (rmse))
misc.plot_line_graph('ElasticNet', raw_values[-20:], predictions)
misc.print_comparison_list('Raw', raw_values[-20:], predictions)
コード例 #15
0
print(regr.intercept_)

y_predicted = regr.predict(X_test)

print('y_test: ')
print(y_test)
print('y_predicted: ')
print(y_predicted)

predictions = list()
for i in range(len(test_scaled)):
    X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
    yhat = y_predicted[i]
    print("Y_test: " + str(y) + " Yhat: " + str(yhat))

    yhat = data_misc.invert_scale(scaler, X, yhat)
    #print("yhat no scaled:" + str(yhat))
    yhat = data_misc.inverse_difference(raw_values, yhat, len(test_scaled) -0 - i)
    #print("yhat no difference:" + str(yhat))
    # store forecast
    predictions.append(yhat)


rmse = sqrt(mean_squared_error(raw_values[-10:], predictions))
print('Test RMSE: %.7f' % (rmse))
misc.plot_line_graph('Test vs Predicted Data', raw_values[-10:], predictions)
misc.print_comparison_list('Raw', raw_values[-10:], predictions)

misc.plot_data_graph('All Data', raw_values)

misc.plot_data_graph('Training Data', raw_values[:-10])
コード例 #16
0
ファイル: Test.py プロジェクト: davis-9fv/Project
# data = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200]
data = [
    112, 118, 132, 129, 121, 135, 148, 148, 136, 119, 104, 118, 115, 126, 141,
    135, 125, 149, 170
]
diff_size = 1
diff_values = data_misc.difference(data, diff_size)
diff_values = diff_values.values
size_diff_values = len(diff_values)
split = int(size_diff_values * 0.80)
train, test = diff_values[0:split], diff_values[split:]

predictions = list()
predictions2 = list()
train_tmp = list()
for i in range(len(train)):
    train_tmp.append(train[i])

for i in range(len(test)):
    yhat = test[i]
    yhat = data_misc.inverse_difference(data, yhat, len(test) + 1 - i)
    a = data_misc.inverse_difference2(data[0], train_tmp, test[i])
    train_tmp.append(test[i])
    predictions.append(yhat)
    predictions2.append(a)

print(predictions)
print(predictions2)
print('end')
コード例 #17
0
    predictions = list()
    normal_y = list()
    for i in range(len(test_scaled)):
        # make one-step forecast
        x_train, y_train = test_scaled[i, 0:-1], test_scaled[i, -1]

        # Forecast for test
        y_predicted = forecast_lstm(lstm_model, 1, x_train)
        # invert scaling
        y_predicted = data_misc.invert_scale(scaler, x_train, y_predicted)
        y = data_misc.invert_scale(scaler, x_train, y_train)

        # invert differencing test
        # We get the history and we cut because we pair with the supervised that was cut before. It was full of zeroes
        d = raw_values[split + window_size - 1:]
        y_predicted = data_misc.inverse_difference(d, y_predicted,
                                                   len(test_scaled) + 1 - i)
        y = data_misc.inverse_difference(d, y, len(test_scaled) + 1 - i)

        # print(" Y_test: " + str(y) + " Yhat: " + str(yhat) + " yraw:" + str(raw_values[i + len(train) + 1]))
        # store forecast
        normal_y.append(y)
        predictions.append(y_predicted)

    # report performance
    # the +1 represents the drop that we did when the data was diff
    y_raw = raw_values[split + 1 + window_size:]
    test_rmse = sqrt(mean_squared_error(y_raw, predictions))
    print('%d) Test RMSE: %.3f' % (r + 1, test_rmse))
    # print(predictions)
    error_scores.append(test_rmse)
    # plot
コード例 #18
0
ファイル: Dummy.py プロジェクト: davis-9fv/Project
# split data into train and test-sets
train, test = supervised_values[0:split], supervised_values[split:]

# transform the scale of the data
scaler, train_scaled, test_scaled = data_misc.scale(train, test)

x_train, y_train = train_scaled[:, 0:-1], train_scaled[:, -1]
x_test, y_test = test_scaled[:, 0:-1], test_scaled[:, -1]

y_predicted = x_test

predictions = list()
for i in range(len(x_test)):
    X, y = x_test[i], y_test[i]
    yhat = y_predicted[i]
    # print("Y_test: " + str(y) + " Yhat: " + str(yhat))

    yhat = data_misc.invert_scale(scaler, X, yhat)
    # print("yhat no scaled:" + str(yhat))

    yhat = data_misc.inverse_difference(raw_values, yhat, len(x_test) + 1 - i)
    # print("yhat no difference:" + str(yhat))
    # store forecast

    predictions.append(yhat)

d = raw_values[split + 1:]

rmse = sqrt(mean_squared_error(d, predictions))
print('Test RMSE: %.7f' % (rmse))
コード例 #19
0
avg = [10, 22, 30, 42, 50, 62, 70, 82, 90, 102]
btc = [110, 123, 130, 143, 150, 163, 170, 183, 190, 203]
trend = [212, 224, 232, 244, 252, 264, 272, 284, 292, 304]

df = DataFrame({'btc': btc,
                'trend': trend})
print(btc)

avg_diff = data_misc.difference(avg, 1)
avg_supervised = data_misc.timeseries_to_supervised(avg_diff, avg_window_size)
print(avg_supervised)

btc_supervised = supervised_diff_dt(df, btc_window_size)

# We pair the avg_supervised column with the weight_supervised
cut_beginning = [avg_window_size, btc_window_size]
cut_beginning = max(cut_beginning)

avg_supervised = avg_supervised.values[cut_beginning:, :]
btc_supervised = btc_supervised[cut_beginning:, :]

# Concatenate with numpy
supervised = np.concatenate((btc_supervised, avg_supervised), axis=1)

print(supervised)
print("No diff")
for i in range(len(avg_diff)):
    avg_un_diff = data_misc.inverse_difference(avg, avg_diff[i], len(avg_diff) + 1 - i)
    print(avg_un_diff)
print("End")