def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # random some sample data np.random.seed(123) n_assets = 4 time_series_group = [] for i in range(n_assets): rows,cols = 1000,1 data = np.random.rand(rows, cols) # use other random functions to generate values with constraints tidx = pd.date_range('2019-01-01', periods=rows, freq='MS') # freq='MS'set the frequency of date in months and start from day 1. You can use 'T' for minutes and so on ID = 'FakeStock_' + str(i+1) df = pd.DataFrame(data, columns=[ID], index=tidx) ds = DataSeries(category='Stock', freq = 'monthly', time_series=df) time_series_group.append(ds) input_dc_test = DataCollection(label='Test Collection', time_series_group=time_series_group) self.input_dc = input_dc_test # for exception test for i in range(2): rows,cols = 1000,1 data = np.random.rand(rows, cols) # use other random functions to generate values with constraints tidx = pd.date_range('2019-01-01', periods=rows, freq='D') # freq='MS'set the frequency of date in months and start from day 1. You can use 'T' for minutes and so on ID = 'FakeStock_Daily_' + str(i+1) df = pd.DataFrame(data, columns=[ID], index=tidx) ds = DataSeries(category='Stock', freq = 'daily', time_series=df) time_series_group.append(ds) input_dc_test_2 = DataCollection(label='Test Collection 2', time_series_group=time_series_group) self.input_dc_2 = input_dc_test_2
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) a = pd.DataFrame([10.2, 12, 32.1, 9.32], columns=['ABC'], index=pd.to_datetime(['2020-01-01','2020-02-01','2020-03-01','2020-04-01'])) a.index.name = 'Date' self.a_series = DataSeries('ETF', 'monthly', a) b = pd.DataFrame([2.3, 3.6, 4.5], columns=['KKK'], index=pd.to_datetime(['2020-01-01','2020-02-01','2020-03-01',])) b.index.name = 'Date' self.b_series = DataSeries('Bond', 'monthly', b) self.collect = DataCollection('trial', [self.a_series, self.b_series])
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # fake data by ZZ Daily self.a_series = DataSeries( 'ETF', 'daily', pd.DataFrame([10.0, 15.0, 20.0, 30.0], columns=['ABC'], index=pd.to_datetime([ '2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04' ]))) self.b_series = DataSeries( 'Bond', 'daily', pd.DataFrame([1.0, 3.5, 4.5], columns=['KKK'], index=pd.to_datetime([ '2020-01-01', '2020-01-02', '2020-01-03', ]))) self.collect = DataCollection('trial', [self.a_series, self.b_series]) d = {'Initial weights': [0.6, 0.4]} self.weights = pd.DataFrame(data=d).T self.weights = self.weights.rename(columns={0: 'ABC', 1: 'KKK'}) self.p = port.EqualPort("test equal port") self.p.calculate_initial_weight(self.collect) # Monthly path_monthly = os.path.join('test', 'Data', 'Monthly') dic_monthly = DataPreprocessing.read_file(path_monthly) n_assets = 4 time_series_group = [] for i in range(n_assets): df = dic_monthly[list(dic_monthly.keys())[i]] ds = DataSeries(df[1], 'monthly', df[0]) time_series_group.append(ds) input_dc_test = DataCollection(label='Test Collection', time_series_group=time_series_group) self.input_dc = input_dc_test self.input_freq = input_dc_test.get_freq() self.input_df = self.input_dc.to_df() self.n_asset = len(self.input_df.columns) input_weights = [[1 / self.n_asset] * self.n_asset] input_weights_df = pd.DataFrame(input_weights, columns=self.input_df.columns, index=['Initial weights']) self.input_weights_df = input_weights_df
def run_ESRNN(): import torch device = 'cuda' if torch.cuda.is_available() else 'cpu' path_daily = r'C:\Users\xxxli\Desktop\Daily' dic_daily = preprocess.read_file(path_daily) series_list = [] for k, v in dic_daily.items(): ticker_name = k df, cat = v df = preprocess.single_price(df, ticker_name) # column = [ticker] series_list.append(DataSeries(cat, 'daily', df)) collect = DataCollection('universe daily', series_list) train_dc, test_dc = collect.split(numTest = 24) m = ModelESRNN( max_epochs = 15, batch_size = 32, dilations=[[1,3], [7, 14]], input_size = 12, output_size = 24, device = device) m.train(train_dc) y_test = m.predict(test_dc) y_test_df = y_test.to_df() y_test_df.to_csv('hyper_ESRNN_1.csv')
def predict(self, numPredict:int, test_dc: DataCollection): # if recommend model is trained, use recommend model, # else do not use recommend model if not self.rec_model: rec = robjects.r('NULL') else: rec = self.rec_model date = test_dc.to_df().index res = [] for i, series in enumerate(self.data): rList = FloatVector(series) pred = pd.DataFrame(self.tel.telescope_forecast(rList, numPredict, rec_model = rec, natural = True, boxcox = True, doAnomDet = False, replace_zeros = True, use_indicators = True, plot = False)[0], columns = [self.tickers[i]], index = date) ds = DataSeries(self.categories[i], self.frequency, pred) res.append(ds) dc = DataCollection(self.label, res) return dc
def to_dc( self, df, pred_label, pred_freq, ): ''' reformat forecast dataframe output from predict() into DataCollection Obj. Args ---------- pred_label: str used to label DataCollection pred_freq: dict{ticker: str} used as freq of each DataSeries ''' ds_lst = [] for k, v in df.groupby(['x', 'unique_id']): category, ticker = k ds_df = v[['ds', 'y_hat']] ds_df = ds_df.rename(columns={ 'ds': 'Date', 'y_hat': ticker }).set_index('Date') ds_lst.append(DataSeries(category, pred_freq[ticker], ds_df)) dc = DataCollection(pred_label, ds_lst) return dc
def dc_generator(path: str, frequency: str): dic, recover_list, ticker_list = DataPreprocessing.read_file(path) series_list = [] for k, v in dic.items(): df, cat = v df = DataPreprocessing.single_price(df, k) series_list.append(DataSeries(cat, frequency, df)) collect = DataCollection(frequency + ' Collection', series_list) return collect, recover_list, ticker_list
def test_MP_class(self): import torch device = 'cuda' if torch.cuda.is_available() else 'cpu' path_monthly = os.path.join('test','Data','Monthly') dic_monthly = DP.read_file(path_monthly) n_assets = 1 time_series_group = [] for i in range(n_assets): df = dic_monthly[list(dic_monthly.keys())[i]] ds = DataSeries('ETF', 'monthly', df[0]) time_series_group.append(ds) input_dc = DataCollection('test1', time_series_group) m = ModelESRNN(seasonality = [12], input_size = 4, output_size = 12, device=device) train_dc, test_dc = input_dc.split(numTest = 12) m.train(train_dc) forecast_dc = m.predict(test_dc) # train_dc.to_df().to_csv('insample.csv') test_dc.to_df().to_csv('test.csv') # forecast_dc.to_df().to_csv('forecast.csv') mn = MN.ModelNaive2(2, train_dc) naive2_dc = mn.fit_and_generate_prediction(12, 'MS') naive2_dc.to_df().to_csv('naive.csv') mp = MP.ModelPerformance("test model performance", 2, test_dc, forecast_dc, train_dc, naive2_dc) mase = MP.MASE(test_dc.to_df(), forecast_dc.to_df(), train_dc.to_df(), 2) smape = MP.sMAPE(test_dc.to_df(), forecast_dc.to_df()) mape = MP.MAPE(mp.y_df, mp.y_hat_df) r2 = MP.R2(test_dc.to_df(), forecast_dc.to_df()) rmse = MP.RMSE(test_dc.to_df(), forecast_dc.to_df()) owa = MP.OWA(test_dc.to_df(), forecast_dc.to_df(), train_dc.to_df(), naive2_dc.to_df(), 2) u1 = MP.Theil_U1(test_dc.to_df(), forecast_dc.to_df()) u2 = MP.Theil_U2(test_dc.to_df(), forecast_dc.to_df()) mp.MASE() mp.sMAPE() mp.MAPE() mp.R2() mp.RMSE() mp.OWA() mp.Theil_U1() mp.Theil_U2() self.assertAlmostEqual(mp.metrics['sMAPE'], smape) self.assertAlmostEqual(mp.metrics['MAPE'], mape) self.assertAlmostEqual(mp.metrics['R2'], r2) self.assertAlmostEqual(mp.metrics['RMSE'], rmse) self.assertAlmostEqual(mp.metrics['MASE'], mase) self.assertAlmostEqual(mp.metrics['OWA'], owa) self.assertAlmostEqual(mp.metrics['Theil_U1'], u1) self.assertAlmostEqual(mp.metrics['Theil_U2'], u2)
def recover_return(input_df: pd.DataFrame, recover_list, ticker_list): # input_df = input_df + recover - 1 ds_list = [] for column in input_df: idx = ticker_list.index(column) recover_num = recover_list[idx] temp_series = input_df[column] + recover_num - 1 ds_list.append(DataSeries('ETF', 'daily', temp_series.to_frame())) output_dc = DataCollection('Daily Collection', ds_list) return output_dc
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # An example of how to use Telescope model path_monthly = os.path.join('test', 'Data', 'Monthly') dic_monthly = preprocess.read_file(path_monthly) series_list = [] for k, v in dic_monthly.items(): df, cat = v df = preprocess.single_price(df, k) series_list.append(DataSeries(cat, 'monthly', df)) self.collect = DataCollection('test1', series_list)
def test_DataCollection_add(self): d = pd.DataFrame([11, 22], columns=['fakeZZZ'], index=pd.to_datetime(['2019-1-12', '2019-02-05'])) d_series = DataSeries('Bond', 'monthly', d) c_plus = self.c_collection.copy() c_plus.add(d_series) compare = [self.a_series, self.b_series, d_series] for i, item in enumerate(c_plus): assert (self.compareSeries(item, compare[i]))
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) path_monthly = os.path.join('test', 'Data', 'Monthly') dic_monthly = DataPreprocessing.read_file(path_monthly) n_assets = 4 time_series_group = [] for i in range(n_assets): df = dic_monthly[list(dic_monthly.keys())[i]] ds = DataSeries('ETF', 'monthly', df[0]) time_series_group.append(ds) input_dc_test = DataCollection(label='Test Collection', time_series_group=time_series_group) self.input_dc = input_dc_test self.input_freq = input_dc_test.get_freq() self.input_df = self.input_dc.to_df().dropna() self.a = pd.DataFrame([10, 12, 32, 9, 11, 9], columns=['fakeSPY'], index=pd.to_datetime([ '2020-01-01', '2020-02-01', '2020-03-01', '2020-04-01', '2020-05-01', '2020-06-01' ])) self.a_series = DataSeries('ETF', self.input_freq, self.a) self.b = pd.DataFrame([1, 1.2, 3.2, 0.9], columns=['fakeTreasury'], index=pd.to_datetime([ '2019-12-01', '2020-02-01', '2020-03-01', '2020-04-01' ])) self.b_series = DataSeries('Bond', self.input_freq, self.b) self.c_collection = DataCollection('trial', [self.a_series, self.b_series]) self.c_df = self.c_collection.to_df().interpolate(method='linear', axis=0)
def test_Naive2(self): path_monthly = os.path.join('test', 'Data', 'Monthly') dic_monthly = preprocess.read_file(path_monthly) series_list = [] for k, v in dic_monthly.items(): df, cat = v df = preprocess.single_price(df, k) series_list.append(DataSeries(cat, 'monthly', df)) collect = DataCollection('test1', series_list) train_dc, test_dc = collect.split(numTest=12) m = ModelNaive2(12, train_dc, test_dc) y_hat_Naive2_dc = m.fit_and_generate_prediction(12, freq='MS') y_hat_Naive2_dc.to_df().to_csv('test_Naive2_result.csv')
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.a = pd.DataFrame([10.2, 12, 32.1, 9.32], columns=['fakeSPY'], index=pd.to_datetime([ '2020-01-01', '2020-02-01', '2020-03-01', '2020-04-01' ])) self.a_series = DataSeries('ETF', 'monthly', self.a) self.b = pd.DataFrame([2.3, 3.6, 4.5], columns=['fakeTreasury'], index=pd.to_datetime( ['2019-12-12', '2020-02-05', '2020-09-13'])) self.b_series = DataSeries('Bond', 'monthly', self.b) self.c_collection = DataCollection('trial', [self.a_series, self.b_series]) # For test_the_rest_of_entire_dataset(): self.a_entire = pd.DataFrame([10.2, 12, 32.1, 9.32, 11.5, 9.7], columns=['fakeSPY'], index=pd.to_datetime([ '2020-01-01', '2020-02-01', '2020-03-01', '2020-04-01', '2020-05-01', '2020-06-01' ])) self.a_series_entire = DataSeries('ETF', 'monthly', self.a_entire) self.b_entire = pd.DataFrame([2.3, 3.6, 4.5, 5.5], columns=['fakeTreasury'], index=pd.to_datetime([ '2019-12-12', '2020-02-05', '2020-09-13', '2020-10-13' ])) self.b_series_entire = DataSeries('Bond', 'monthly', self.b_entire) self.c_collection_entire = DataCollection( 'trial', [self.a_series_entire, self.b_series_entire]) self.a_exp = pd.DataFrame([11.5, 9.7], columns=['fakeSPY'], index=pd.to_datetime( ['2020-05-01', '2020-06-01'])) self.a_series_exp = DataSeries('ETF', 'monthly', self.a_exp) self.b_exp = pd.DataFrame([5.5], columns=['fakeTreasury'], index=pd.to_datetime(['2020-10-13'])) self.b_series_exp = DataSeries('Bond', 'monthly', self.b_exp) self.c_collection_exp = DataCollection( 'trial', [self.a_series_exp, self.b_series_exp])
def test_ESRNN(self): # An example of how to use ESRNN import torch device = 'cuda' if torch.cuda.is_available() else 'cpu' path_daily = os.path.join('test','Data','daily') dic_daily = preprocess.read_file(path_daily) series_list = [] for k, v in dic_daily.items(): df, cat = v df = preprocess.single_price(df, k) series_list.append(DataSeries(cat, 'daily', df)) collect = DataCollection('test1', series_list) m = ModelESRNN(max_epochs = 5, seasonality = [], batch_size = 64, input_size = 12, output_size = 12, device = device) train_dc, test_dc = collect.split(numTest = 12) m.train(train_dc) y_test = m.predict(test_dc) assert(isinstance(y_test, DataCollection)) y_test_df = y_test.to_df() y_test_df.to_csv('predict_result.csv')
lr_scheduler_step_size = 9 lr_decay = 0.9 noise_std = 0.001 level_variability_penalty = 80 state_hsize = 40 dilation = [[1]] add_nl_layer = False seasonality = [5] # action path = os.path.join('test', 'Data', 'Daily') dic = preprocess.read_file(path) series_list = [] for k, v in dic.items(): df, cat = v df = preprocess.single_price(df, k) series_list.append(DataSeries(cat, 'daily', df)) collect = DataCollection('RollingValidation', series_list) input_dc, _ = collect.split(numTest=2 * numTest) score, _ = validation_simple( input_dc, numTest=numTest, max_epochs=max_epochs, batch_size=batch_size, learning_rate=learning_rate, lr_scheduler_step_size=lr_scheduler_step_size, lr_decay=lr_decay, noise_std=noise_std, level_variability_penalty=level_variability_penalty, state_hsize=state_hsize, dilations=dilation,
class Test_Data(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.a = pd.DataFrame([10.2, 12, 32.1, 9.32], columns=['fakeSPY'], index=pd.to_datetime([ '2020-01-01', '2020-02-01', '2020-03-01', '2020-04-01' ])) self.a_series = DataSeries('ETF', 'monthly', self.a) self.b = pd.DataFrame([2.3, 3.6, 4.5], columns=['fakeTreasury'], index=pd.to_datetime( ['2019-12-12', '2020-02-05', '2020-09-13'])) self.b_series = DataSeries('Bond', 'monthly', self.b) self.c_collection = DataCollection('trial', [self.a_series, self.b_series]) # For test_the_rest_of_entire_dataset(): self.a_entire = pd.DataFrame([10.2, 12, 32.1, 9.32, 11.5, 9.7], columns=['fakeSPY'], index=pd.to_datetime([ '2020-01-01', '2020-02-01', '2020-03-01', '2020-04-01', '2020-05-01', '2020-06-01' ])) self.a_series_entire = DataSeries('ETF', 'monthly', self.a_entire) self.b_entire = pd.DataFrame([2.3, 3.6, 4.5, 5.5], columns=['fakeTreasury'], index=pd.to_datetime([ '2019-12-12', '2020-02-05', '2020-09-13', '2020-10-13' ])) self.b_series_entire = DataSeries('Bond', 'monthly', self.b_entire) self.c_collection_entire = DataCollection( 'trial', [self.a_series_entire, self.b_series_entire]) self.a_exp = pd.DataFrame([11.5, 9.7], columns=['fakeSPY'], index=pd.to_datetime( ['2020-05-01', '2020-06-01'])) self.a_series_exp = DataSeries('ETF', 'monthly', self.a_exp) self.b_exp = pd.DataFrame([5.5], columns=['fakeTreasury'], index=pd.to_datetime(['2020-10-13'])) self.b_series_exp = DataSeries('Bond', 'monthly', self.b_exp) self.c_collection_exp = DataCollection( 'trial', [self.a_series_exp, self.b_series_exp]) def test_DataSeries_basic(self): a = self.a a_series = self.a_series assert (len(a_series) == 4) assert (str(a_series) == 'monthly fakeSPY') assert (a_series.get_ticker() == 'fakeSPY') assert (a_series.get_category() == 'ETF') assert (a_series.get_freq() == 'monthly') assert (a.equals(a_series.get_ts())) # test deep copy a_copy = a_series.copy() assert (a_copy != a_series and a_copy.get_ts().equals(a_series.get_ts())) assert (isinstance(a_series.to_Series(), pd.Series)) def test_DataSeries_add_sub(self): diff = self.a_series_entire - self.a_series assert (self.compareSeries(diff, self.a_series_exp)) a_plus = diff + self.a_series assert (self.compareSeries(a_plus, self.a_series_entire)) def test_DataSeries_to_list(self): lst = self.a_series.to_list() assert (lst == [10.2, 12, 32.1, 9.32]) def test_last_index(self): assert (self.a_series.get_last_date() == pd.to_datetime('2020-04-01')) def test_DataSeries_split_and_trim(self): # test split a_train, a_test = self.a_series.split(pct=0.75) assert (isinstance(a_train, DataSeries)) assert (isinstance(a_test, DataSeries)) assert (len(a_train) == 3) assert (len(a_test) == 1) assert (self.a.iloc[:3].equals(a_train.get_ts())) assert (self.a.iloc[3:].equals(a_test.get_ts())) # test trim trimed = self.a_series.trim('2020-02-01', '2020-03-01') assert (len(trimed) == 2) assert (self.a.loc['2020-02-01':'2020-03-01'].equals(trimed.get_ts())) @staticmethod def compareSeries(a, b): flag = True if not isinstance(a, DataSeries): print("\n The first item is not a DataSeries object") return False if not isinstance(b, DataSeries): print("\n The Second item is not a DataSeries object") return False if a == b: print("\n The two items are the same object") flag = False if len(a) != len(b): print("\n The two items does not have the same length") flag = False if str(a) != str(b): print("\n The two items does not have the same ticker") flag = False if a.get_category() != b.get_category(): print("\n The two items does not have the same category") flag = False if not a.get_ts().equals(b.get_ts()): print("\n The two items does not have the same time series") flag = False if not a.get_freq() == b.get_freq(): print("\n The two items does not have the same frequency") flag = False return flag def test_DataCollection_basic(self): assert (len(self.c_collection) == 2) assert (self.c_collection.get_freq() == 'monthly') for item, compare in zip(self.c_collection, [self.a_series, self.b_series]): assert (self.compareSeries(item, compare)) def test_DataCollection_add_sub(self): res = self.c_collection_entire - self.c_collection expected = self.c_collection_exp for r, e in zip(res, expected): assert (self.compareSeries(r, e)) res_plus = res + self.c_collection for r, e in zip(res_plus, self.c_collection_entire): assert (self.compareSeries(r, e)) def test_DataCollection_get_series(self): item1 = self.c_collection[1] assert (self.compareSeries(item1, self.b_series)) item2 = self.c_collection.get_series('fakeSPY') assert (self.compareSeries(item2, self.a_series)) def test_DataCollection_copy(self): c = self.c_collection.copy() assert (c != self.c_collection) assert (c.label == self.c_collection.label) assert (c.get_freq() == self.c_collection.get_freq()) for one, two in zip(c, self.c_collection): assert (self.compareSeries(one, two)) def test_DataCollection_summary(self): pass def test_DataCollection_split(self): train, test = self.c_collection.split(pct=0.75) assert (str(train) == 'trial') assert (train.freq == 'monthly') assert (str(test) == 'trial') assert (test.freq == 'monthly') compare = [self.a_series.split(0.75), self.b_series.split(0.75)] compare_train, compare_test = zip(*compare) train_col, test_col = list(compare_train), list(compare_test) for i, item in enumerate(train): assert (self.compareSeries(item, train_col[i])) for i, item in enumerate(test): assert (self.compareSeries(item, test_col[i])) def test_DataCollection_list(self): assert (self.c_collection.ticker_list() == ['fakeSPY', 'fakeTreasury']) assert (self.c_collection.category_list() == ['ETF', 'Bond']) assert (self.c_collection.last_date_list() == pd.to_datetime( ['2020-04-01', '2020-09-13']).to_list()) assert (self.c_collection.to_list() == [[10.2, 12, 32.1, 9.32], [2.3, 3.6, 4.5]]) def test_DataCollection_add(self): d = pd.DataFrame([11, 22], columns=['fakeZZZ'], index=pd.to_datetime(['2019-1-12', '2019-02-05'])) d_series = DataSeries('Bond', 'monthly', d) c_plus = self.c_collection.copy() c_plus.add(d_series) compare = [self.a_series, self.b_series, d_series] for i, item in enumerate(c_plus): assert (self.compareSeries(item, compare[i])) def test_DataCollection_df(self): df = self.c_collection.to_df() compare = pd.concat([self.a, self.b], axis=1) assert (df.equals(compare)) def test_price_to_return(self): pass