Exemple #1
0
        loss_file = '{}/{}.csv'.format(csv_dir, loss_file_name)

        return [y_pred_file, ts_plot_path, loss_file, loss_plot_path]

    else:

        return [y_pred_file, ts_plot_path]

#model options: VAR, RNN , R2N2, Baselime, Perfect
model = 'Perfect'
y_pred_csv_name = 'train_y'
plot_suffix = 'train'
loss_file_name = None

#set options: 'train', 'cross_val', or 'test'
set = 'train'

file_list = get_file_names(model= model, y_pred_csv_name= y_pred_csv_name,
                           plot_suffix= plot_suffix, loss_file_name= loss_file_name)

check_model = eval_model(y_pred_file= file_list[0], set= set)

check_model.backtest(printer=True)

check_model.plot_backtest(file_path=file_list[1])

if loss_file_name is not None:
    check_model.plot_loss(loss_csv_path= file_list[2], loss_plot_path= file_list[3])

check_model.accuracy(printer= True)
VAR_model = VAR(X_train_df)

results = VAR_model.fit(1)


Y_train_pred_df, train_auc, train_mse = fit_VAR(results, 'train')

Y_dev_pred_df, dev_auc, dev_mse = fit_VAR(results, 'cross_val')

model_name = 'VAR_int'


# run eval class

check_model = eval_model(y_pred_df= Y_train_pred_df, y_actual_df= Y_train_df)

check_model.backtest(printer=False)

check_model.accuracy(printer=False)

train_metrics_dict = check_model.metrics

train_acc_score = check_model.accuracy_score

train_conf_list = check_model.confusion_matrix

# do for both train and dev set metrics

# run eval class
Exemple #3
0
import numpy as np
import os
import pandas as pd
from Data.scripts.data import data
from sklearn.metrics import mean_squared_error, roc_auc_score, roc_curve, auc
from Models.Evaluation.eval import eval_model

_, Y_actual = data.import_data(set='test')

y_pred_df = Y_actual.shift(1)

y_pred_df = y_pred_df.dropna()

Y_actual = Y_actual.loc[y_pred_df.index]

tester = eval_model(y_pred_df=y_pred_df, y_actual_df=Y_actual)

tester.backtest(printer=False)

out_dict = tester.metrics

out_dict['mse'] = mean_squared_error(Y_actual.as_matrix(),
                                     y_pred_df.as_matrix())

flat_pred = np.clip(y_pred_df.as_matrix().flatten() + 0.5, 0, 1)

flat_actual = np.where(Y_actual.as_matrix().flatten() > 0, 1, 0)

out_dict['auc'] = roc_auc_score(flat_actual, flat_pred)
model = 'Test'
print(out_dict)
Exemple #4
0
import torch
from torch.autograd import Variable

from Data.scripts.data import data
from Models.Extra.scripts.RNN import RNN
from Models.Evaluation.eval import eval_model
from statsmodels.tsa.api import VAR
from sklearn.metrics import mean_squared_error, roc_auc_score, roc_curve, auc

X_test, Y_test = data.import_data(set='test')

Y_pred_df = ((-1) * Y_test.shift(1)).dropna()

Y_test = Y_test.loc[Y_pred_df.index]

check_model = eval_model(y_pred_df=Y_pred_df, y_actual_df=Y_test)

check_model.backtest(printer=False)

check_model.accuracy(printer=False)

dev_metrics_dict = check_model.metrics

dev_acc_score = check_model.accuracy_score

dev_conf_list = check_model.confusion_matrix

print('Dev')
print(dev_metrics_dict)
print('Acc: {}'.format(dev_acc_score))
from torch.autograd import Variable

from Data.scripts.data import data
from Models.Extra.scripts.RNN import RNN
from Models.Evaluation.eval import eval_model
import matplotlib.pyplot as plt
from statsmodels.tsa.api import VAR
from sklearn.metrics import mean_squared_error, roc_auc_score, roc_curve, auc

X, Y = data.import_data(set='test')

Y_pred = pd.read_csv(
    '/Users/ianshaw/Downloads/GitHub/deepcrypto/Models/VAR/predicted_values_VAR_test_shifted.csv',
    index_col=0)

check_model = eval_model(y_pred_df=Y_pred, y_actual_df=Y)

check_model.backtest(printer=False)

check_model.accuracy(printer=False)

check_model.strat_series.plot()
plt.show()

train_metrics_dict = check_model.metrics

train_acc_score = check_model.accuracy_score

train_conf_list = check_model.confusion_matrix

print('Train')