def run_ESRNN(): import torch device = 'cuda' if torch.cuda.is_available() else 'cpu' path_daily = r'C:\Users\xxxli\Desktop\Daily' dic_daily = preprocess.read_file(path_daily) series_list = [] for k, v in dic_daily.items(): ticker_name = k df, cat = v df = preprocess.single_price(df, ticker_name) # column = [ticker] series_list.append(DataSeries(cat, 'daily', df)) collect = DataCollection('universe daily', series_list) train_dc, test_dc = collect.split(numTest = 24) m = ModelESRNN( max_epochs = 15, batch_size = 32, dilations=[[1,3], [7, 14]], input_size = 12, output_size = 24, device = device) m.train(train_dc) y_test = m.predict(test_dc) y_test_df = y_test.to_df() y_test_df.to_csv('hyper_ESRNN_1.csv')
def test_MP_class(self): import torch device = 'cuda' if torch.cuda.is_available() else 'cpu' path_monthly = os.path.join('test','Data','Monthly') dic_monthly = DP.read_file(path_monthly) n_assets = 1 time_series_group = [] for i in range(n_assets): df = dic_monthly[list(dic_monthly.keys())[i]] ds = DataSeries('ETF', 'monthly', df[0]) time_series_group.append(ds) input_dc = DataCollection('test1', time_series_group) m = ModelESRNN(seasonality = [12], input_size = 4, output_size = 12, device=device) train_dc, test_dc = input_dc.split(numTest = 12) m.train(train_dc) forecast_dc = m.predict(test_dc) # train_dc.to_df().to_csv('insample.csv') test_dc.to_df().to_csv('test.csv') # forecast_dc.to_df().to_csv('forecast.csv') mn = MN.ModelNaive2(2, train_dc) naive2_dc = mn.fit_and_generate_prediction(12, 'MS') naive2_dc.to_df().to_csv('naive.csv') mp = MP.ModelPerformance("test model performance", 2, test_dc, forecast_dc, train_dc, naive2_dc) mase = MP.MASE(test_dc.to_df(), forecast_dc.to_df(), train_dc.to_df(), 2) smape = MP.sMAPE(test_dc.to_df(), forecast_dc.to_df()) mape = MP.MAPE(mp.y_df, mp.y_hat_df) r2 = MP.R2(test_dc.to_df(), forecast_dc.to_df()) rmse = MP.RMSE(test_dc.to_df(), forecast_dc.to_df()) owa = MP.OWA(test_dc.to_df(), forecast_dc.to_df(), train_dc.to_df(), naive2_dc.to_df(), 2) u1 = MP.Theil_U1(test_dc.to_df(), forecast_dc.to_df()) u2 = MP.Theil_U2(test_dc.to_df(), forecast_dc.to_df()) mp.MASE() mp.sMAPE() mp.MAPE() mp.R2() mp.RMSE() mp.OWA() mp.Theil_U1() mp.Theil_U2() self.assertAlmostEqual(mp.metrics['sMAPE'], smape) self.assertAlmostEqual(mp.metrics['MAPE'], mape) self.assertAlmostEqual(mp.metrics['R2'], r2) self.assertAlmostEqual(mp.metrics['RMSE'], rmse) self.assertAlmostEqual(mp.metrics['MASE'], mase) self.assertAlmostEqual(mp.metrics['OWA'], owa) self.assertAlmostEqual(mp.metrics['Theil_U1'], u1) self.assertAlmostEqual(mp.metrics['Theil_U2'], u2)
def test_Naive2(self): path_monthly = os.path.join('test', 'Data', 'Monthly') dic_monthly = preprocess.read_file(path_monthly) series_list = [] for k, v in dic_monthly.items(): df, cat = v df = preprocess.single_price(df, k) series_list.append(DataSeries(cat, 'monthly', df)) collect = DataCollection('test1', series_list) train_dc, test_dc = collect.split(numTest=12) m = ModelNaive2(12, train_dc, test_dc) y_hat_Naive2_dc = m.fit_and_generate_prediction(12, freq='MS') y_hat_Naive2_dc.to_df().to_csv('test_Naive2_result.csv')
def test_ESRNN(self): # An example of how to use ESRNN import torch device = 'cuda' if torch.cuda.is_available() else 'cpu' path_daily = os.path.join('test','Data','daily') dic_daily = preprocess.read_file(path_daily) series_list = [] for k, v in dic_daily.items(): df, cat = v df = preprocess.single_price(df, k) series_list.append(DataSeries(cat, 'daily', df)) collect = DataCollection('test1', series_list) m = ModelESRNN(max_epochs = 5, seasonality = [], batch_size = 64, input_size = 12, output_size = 12, device = device) train_dc, test_dc = collect.split(numTest = 12) m.train(train_dc) y_test = m.predict(test_dc) assert(isinstance(y_test, DataCollection)) y_test_df = y_test.to_df() y_test_df.to_csv('predict_result.csv')
def validation_rolling(input_dc: DataCollection, num_split: int, numTest: int, max_epochs=15, batch_size=1, batch_size_test=128, freq_of_test=-1, learning_rate=1e-3, lr_scheduler_step_size=9, lr_decay=0.9, per_series_lr_multip=1.0, gradient_eps=1e-8, gradient_clipping_threshold=20, rnn_weight_decay=0, noise_std=0.001, level_variability_penalty=80, testing_percentile=50, training_percentile=50, ensemble=False, cell_type='LSTM', state_hsize=40, dilations=[[1, 2], [4, 8]], add_nl_layer=False, seasonality=[4], input_size=4, output_size=8, frequency=None, max_periods=20, random_seed=1): import time scores_list = [] train_val_dic = {} device = 'cuda' if torch.cuda.is_available() else 'cpu' for i in range(num_split): train, validation = input_dc.split(numTest=numTest) train_val_dic[i] = [train, validation] input_dc = train # record score of error total_score = 0 elapse = 0 for i in range(num_split - 1, -1, -1): train_dc = train_val_dic[i][0] validation_dc = train_val_dic[i][1] validation_df = validation_dc.to_df() start_time = time.time() m = ModelESRNN(max_epochs=max_epochs, batch_size=batch_size, batch_size_test=batch_size_test, freq_of_test=freq_of_test, learning_rate=learning_rate, lr_scheduler_step_size=lr_scheduler_step_size, lr_decay=lr_decay, per_series_lr_multip=per_series_lr_multip, gradient_eps=gradient_eps, gradient_clipping_threshold=gradient_clipping_threshold, rnn_weight_decay=rnn_weight_decay, noise_std=noise_std, level_variability_penalty=level_variability_penalty, testing_percentile=testing_percentile, training_percentile=training_percentile, ensemble=ensemble, cell_type=cell_type, state_hsize=state_hsize, dilations=dilations, add_nl_layer=add_nl_layer, seasonality=seasonality, input_size=input_size, output_size=output_size, frequency=frequency, max_periods=max_periods, random_seed=random_seed, device=device) m.train(train_dc) y_predict = m.predict(validation_dc) y_predict_df = y_predict.to_df() score = MP.MAPE(validation_df, y_predict_df) elapse += time.time() - start_time scores_list.append(score) total_score += score score = total_score / num_split return score, scores_list, elapse / num_split, (max_epochs, batch_size, input_size, output_size)
def validation_simple( input_dc: DataCollection, numTest: int, max_epochs=15, batch_size=1, batch_size_test=128, freq_of_test=-1, learning_rate=1e-3, lr_scheduler_step_size=9, lr_decay=0.9, per_series_lr_multip=1.0, gradient_eps=1e-8, gradient_clipping_threshold=20, rnn_weight_decay=0, noise_std=0.001, level_variability_penalty=80, testing_percentile=50, training_percentile=50, ensemble=False, cell_type='LSTM', state_hsize=40, dilations=[[1, 2], [4, 8]], add_nl_layer=False, seasonality=[4], input_size=4, output_size=8, frequency=None, max_periods=20, random_seed=1, ): train_dc, validation_dc = input_dc.split(numTest=numTest) validation_df = validation_dc.to_df() device = 'cuda' if torch.cuda.is_available() else 'cpu' m = ModelESRNN(max_epochs=max_epochs, batch_size=batch_size, batch_size_test=batch_size_test, freq_of_test=freq_of_test, learning_rate=learning_rate, lr_scheduler_step_size=lr_scheduler_step_size, lr_decay=lr_decay, per_series_lr_multip=per_series_lr_multip, gradient_eps=gradient_eps, gradient_clipping_threshold=gradient_clipping_threshold, rnn_weight_decay=rnn_weight_decay, noise_std=noise_std, level_variability_penalty=level_variability_penalty, testing_percentile=testing_percentile, training_percentile=training_percentile, ensemble=ensemble, cell_type=cell_type, state_hsize=state_hsize, dilations=dilations, add_nl_layer=add_nl_layer, seasonality=seasonality, input_size=input_size, output_size=output_size, frequency=frequency, max_periods=max_periods, random_seed=random_seed, device=device) m.train(train_dc) y_predict = m.predict(validation_dc) y_predict_df = y_predict.to_df() score = MP.MAPE(validation_df, y_predict_df) return score, (max_epochs, batch_size, input_size, output_size)
noise_std = 0.001 level_variability_penalty = 80 state_hsize = 40 dilation = [[1]] add_nl_layer = False seasonality = [5] # action path = os.path.join('test', 'Data', 'Daily') dic = preprocess.read_file(path) series_list = [] for k, v in dic.items(): df, cat = v df = preprocess.single_price(df, k) series_list.append(DataSeries(cat, 'daily', df)) collect = DataCollection('RollingValidation', series_list) input_dc, _ = collect.split(numTest=2 * numTest) score, _ = validation_simple( input_dc, numTest=numTest, max_epochs=max_epochs, batch_size=batch_size, learning_rate=learning_rate, lr_scheduler_step_size=lr_scheduler_step_size, lr_decay=lr_decay, noise_std=noise_std, level_variability_penalty=level_variability_penalty, state_hsize=state_hsize, dilations=dilation, add_nl_layer=add_nl_layer, seasonality=seasonality,
[[1],[5,10]], [[1],[5,20]], [[1],[3,5,10]], [[1],[3,5,20]], [[1],[5,10,20]], [[1,3],[5]], [[1,5],[10]], [[1,5],[20]], [[1,3],[5,10]],[[1,3,5],[10]], [[1,3],[5,20]],[[1,3,5],[20]], [[1,5],[10,20]],[[1,5,10],[20]] ] dil_one = [[[1,5]]] # train/test split input_dc, test_dc = collect.split(numTest = 2 * 90) score_list = [] time_list = [] for dn in dil_one: import time start_time = time.time() score, _ = Vn.validation_simple(input_dc, numTest=90, max_epochs=15, batch_size=64, learning_rate=1e-3, lr_scheduler_step_size=9, lr_decay=0.9, noise_std=0.001,level_variability_penalty=80, state_hsize=40, dilations=dn, add_nl_layer=False, seasonality=[5], input_size=5, output_size=90,frequency='D',random_seed=1 ) print("--- dilation :", dn," ---") print("--- score of this config is %s ---" % score)
class Test_Data(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.a = pd.DataFrame([10.2, 12, 32.1, 9.32], columns=['fakeSPY'], index=pd.to_datetime([ '2020-01-01', '2020-02-01', '2020-03-01', '2020-04-01' ])) self.a_series = DataSeries('ETF', 'monthly', self.a) self.b = pd.DataFrame([2.3, 3.6, 4.5], columns=['fakeTreasury'], index=pd.to_datetime( ['2019-12-12', '2020-02-05', '2020-09-13'])) self.b_series = DataSeries('Bond', 'monthly', self.b) self.c_collection = DataCollection('trial', [self.a_series, self.b_series]) # For test_the_rest_of_entire_dataset(): self.a_entire = pd.DataFrame([10.2, 12, 32.1, 9.32, 11.5, 9.7], columns=['fakeSPY'], index=pd.to_datetime([ '2020-01-01', '2020-02-01', '2020-03-01', '2020-04-01', '2020-05-01', '2020-06-01' ])) self.a_series_entire = DataSeries('ETF', 'monthly', self.a_entire) self.b_entire = pd.DataFrame([2.3, 3.6, 4.5, 5.5], columns=['fakeTreasury'], index=pd.to_datetime([ '2019-12-12', '2020-02-05', '2020-09-13', '2020-10-13' ])) self.b_series_entire = DataSeries('Bond', 'monthly', self.b_entire) self.c_collection_entire = DataCollection( 'trial', [self.a_series_entire, self.b_series_entire]) self.a_exp = pd.DataFrame([11.5, 9.7], columns=['fakeSPY'], index=pd.to_datetime( ['2020-05-01', '2020-06-01'])) self.a_series_exp = DataSeries('ETF', 'monthly', self.a_exp) self.b_exp = pd.DataFrame([5.5], columns=['fakeTreasury'], index=pd.to_datetime(['2020-10-13'])) self.b_series_exp = DataSeries('Bond', 'monthly', self.b_exp) self.c_collection_exp = DataCollection( 'trial', [self.a_series_exp, self.b_series_exp]) def test_DataSeries_basic(self): a = self.a a_series = self.a_series assert (len(a_series) == 4) assert (str(a_series) == 'monthly fakeSPY') assert (a_series.get_ticker() == 'fakeSPY') assert (a_series.get_category() == 'ETF') assert (a_series.get_freq() == 'monthly') assert (a.equals(a_series.get_ts())) # test deep copy a_copy = a_series.copy() assert (a_copy != a_series and a_copy.get_ts().equals(a_series.get_ts())) assert (isinstance(a_series.to_Series(), pd.Series)) def test_DataSeries_add_sub(self): diff = self.a_series_entire - self.a_series assert (self.compareSeries(diff, self.a_series_exp)) a_plus = diff + self.a_series assert (self.compareSeries(a_plus, self.a_series_entire)) def test_DataSeries_to_list(self): lst = self.a_series.to_list() assert (lst == [10.2, 12, 32.1, 9.32]) def test_last_index(self): assert (self.a_series.get_last_date() == pd.to_datetime('2020-04-01')) def test_DataSeries_split_and_trim(self): # test split a_train, a_test = self.a_series.split(pct=0.75) assert (isinstance(a_train, DataSeries)) assert (isinstance(a_test, DataSeries)) assert (len(a_train) == 3) assert (len(a_test) == 1) assert (self.a.iloc[:3].equals(a_train.get_ts())) assert (self.a.iloc[3:].equals(a_test.get_ts())) # test trim trimed = self.a_series.trim('2020-02-01', '2020-03-01') assert (len(trimed) == 2) assert (self.a.loc['2020-02-01':'2020-03-01'].equals(trimed.get_ts())) @staticmethod def compareSeries(a, b): flag = True if not isinstance(a, DataSeries): print("\n The first item is not a DataSeries object") return False if not isinstance(b, DataSeries): print("\n The Second item is not a DataSeries object") return False if a == b: print("\n The two items are the same object") flag = False if len(a) != len(b): print("\n The two items does not have the same length") flag = False if str(a) != str(b): print("\n The two items does not have the same ticker") flag = False if a.get_category() != b.get_category(): print("\n The two items does not have the same category") flag = False if not a.get_ts().equals(b.get_ts()): print("\n The two items does not have the same time series") flag = False if not a.get_freq() == b.get_freq(): print("\n The two items does not have the same frequency") flag = False return flag def test_DataCollection_basic(self): assert (len(self.c_collection) == 2) assert (self.c_collection.get_freq() == 'monthly') for item, compare in zip(self.c_collection, [self.a_series, self.b_series]): assert (self.compareSeries(item, compare)) def test_DataCollection_add_sub(self): res = self.c_collection_entire - self.c_collection expected = self.c_collection_exp for r, e in zip(res, expected): assert (self.compareSeries(r, e)) res_plus = res + self.c_collection for r, e in zip(res_plus, self.c_collection_entire): assert (self.compareSeries(r, e)) def test_DataCollection_get_series(self): item1 = self.c_collection[1] assert (self.compareSeries(item1, self.b_series)) item2 = self.c_collection.get_series('fakeSPY') assert (self.compareSeries(item2, self.a_series)) def test_DataCollection_copy(self): c = self.c_collection.copy() assert (c != self.c_collection) assert (c.label == self.c_collection.label) assert (c.get_freq() == self.c_collection.get_freq()) for one, two in zip(c, self.c_collection): assert (self.compareSeries(one, two)) def test_DataCollection_summary(self): pass def test_DataCollection_split(self): train, test = self.c_collection.split(pct=0.75) assert (str(train) == 'trial') assert (train.freq == 'monthly') assert (str(test) == 'trial') assert (test.freq == 'monthly') compare = [self.a_series.split(0.75), self.b_series.split(0.75)] compare_train, compare_test = zip(*compare) train_col, test_col = list(compare_train), list(compare_test) for i, item in enumerate(train): assert (self.compareSeries(item, train_col[i])) for i, item in enumerate(test): assert (self.compareSeries(item, test_col[i])) def test_DataCollection_list(self): assert (self.c_collection.ticker_list() == ['fakeSPY', 'fakeTreasury']) assert (self.c_collection.category_list() == ['ETF', 'Bond']) assert (self.c_collection.last_date_list() == pd.to_datetime( ['2020-04-01', '2020-09-13']).to_list()) assert (self.c_collection.to_list() == [[10.2, 12, 32.1, 9.32], [2.3, 3.6, 4.5]]) def test_DataCollection_add(self): d = pd.DataFrame([11, 22], columns=['fakeZZZ'], index=pd.to_datetime(['2019-1-12', '2019-02-05'])) d_series = DataSeries('Bond', 'monthly', d) c_plus = self.c_collection.copy() c_plus.add(d_series) compare = [self.a_series, self.b_series, d_series] for i, item in enumerate(c_plus): assert (self.compareSeries(item, compare[i])) def test_DataCollection_df(self): df = self.c_collection.to_df() compare = pd.concat([self.a, self.b], axis=1) assert (df.equals(compare)) def test_price_to_return(self): pass