def create_noop_environment(): oneday = timedelta(days=1) start = datetime(2006, 1, 1, tzinfo=pytz.utc) bm_returns = [] tr_curves = OrderedDict() for day in date_gen(start=start, delta=oneday, count=252): dr = DailyReturn(day, 0.01) bm_returns.append(dr) curve = { '10year': 0.0799, '1month': 0.0799, '1year': 0.0785, '20year': 0.0765, '2year': 0.0794, '30year': 0.0804, '3month': 0.0789, '3year': 0.0796, '5year': 0.0792, '6month': 0.0794, '7year': 0.0804, 'tid': 1752 } tr_curves[day] = curve load_nodata = lambda x: (bm_returns, tr_curves) return trading.TradingEnvironment(load=load_nodata)
def get_benchmark_returns(symbol, start_date=None, end_date=None): """ Returns a list of return percentages in chronological order. """ if start_date is None: start_date = datetime(year=1950, month=1, day=3) if end_date is None: end_date = datetime.utcnow() # Get the benchmark data and convert it to a list in chronological order. data_points = list(get_benchmark_data(symbol, start_date, end_date)) data_points.reverse() # Calculate the return percentages. benchmark_returns = [] for i, data_point in enumerate(data_points): if i == 0: curr_open = data_points[i]['open'] returns = (data_points[i]['close'] - curr_open) / curr_open else: prev_close = data_points[i - 1]['close'] returns = (data_point['close'] - prev_close) / prev_close daily_return = DailyReturn(date=data_point['date'], returns=returns) benchmark_returns.append(daily_return) return benchmark_returns
def create_returns_from_range(sim_params): current = sim_params.first_open end = sim_params.last_close test_range = [] while current <= end: r = DailyReturn(current, random.random()) test_range.append(r) current = trading.environment.next_trading_day(current) return test_range
def create_returns_from_range(sim_params): current = sim_params.first_open end = sim_params.last_close one_day = timedelta(days=1) test_range = [] while current <= end: r = DailyReturn(current, random.random()) test_range.append(r) current = get_next_trading_dt(current, one_day) return test_range
def create_returns_from_list(returns, sim_params): current = sim_params.first_open test_range = [] # sometimes the range starts with a non-trading day. if not trading.environment.is_trading_day(current): current = trading.environment.next_trading_day(current) for return_val in returns: r = DailyReturn(current, return_val) test_range.append(r) current = trading.environment.next_trading_day(current) return test_range
def get_benchmark_returns(symbol, start_date=None, end_date=None): if start_date is None: start_date = datetime(year=1950, month=1, day=3) if end_date is None: end_date = datetime.utcnow() benchmark_returns = [] for data_point in get_benchmark_data(symbol, start_date, end_date): returns = (data_point['close'] - data_point['open']) / \ data_point['open'] daily_return = DailyReturn(date=data_point['date'], returns=returns) benchmark_returns.append(daily_return) return benchmark_returns
def create_returns(daycount, sim_params): """ For the given number of calendar (not trading) days return all the trading days between start and start + daycount. """ test_range = [] current = sim_params.first_open one_day = timedelta(days=1) for day in range(daycount): current = current + one_day if trading.environment.is_trading_day(current): r = DailyReturn(current, random.random()) test_range.append(r) return test_range
def load_market_data(self, bm_symbol='^GSPC'): #TODO Parametric #event_dt = datetime.today().replace(tzinfo=pytz.utc) event_dt = datetime.now(pytz.utc) # Getting today benchmark return #NOTE Seems shit but later, previous days could be used to compute indicators #last_bench_return = get_benchmark_returns(bm_symbol, start_date=(event_dt - pd.datetools.Day(self.loopback))) #last_bench_return = last_bench_return[-1] #print('Benchmark on {}: {}'.format(last_bench_return.date, last_bench_return.returns)) for exchange, infos in datautils.Exchange.iteritems(): if infos['index'] == bm_symbol: code = datautils.Exchange[exchange]['code'] break bm_returns = [] while event_dt < self.last_trading_day: #TODO Current value to give #TODO Append only if trading day and market hour #bm_returns.append(DailyReturn(date=event_dt.replace(microsecond=0), returns=last_bench_return.returns)) bm_returns.append(DailyReturn(date=event_dt.replace(microsecond=0), returns=code)) #TODO Frequency control event_dt += self.offset bm_returns = sorted(bm_returns, key=attrgetter('date')) tr_gen = get_treasury_data() while True: try: last_tr = tr_gen.next() except StopIteration: break tr_curves = {} #tr_dt = datetime.today().replace(tzinfo=pytz.utc) tr_dt = datetime.now(pytz.utc) while tr_dt < self.last_trading_day: #tr_dt = tr_dt.replace(hour=0, minute=0, second=0, tzinfo=pytz.utc) tr_curves[tr_dt] = last_tr tr_dt += self.offset tr_curves = OrderedDict(sorted( ((dt, c) for dt, c in tr_curves.iteritems()), key=lambda t: t[0])) return bm_returns, tr_curves
def test_risk_metrics_returns(self): trading.environment = trading.TradingEnvironment() # Advance start date to first date in the trading calendar if trading.environment.is_trading_day(self.start_date): start_date = self.start_date else: start_date = trading.environment.next_trading_day(self.start_date) self.all_benchmark_returns = pd.Series({ x.date: x.returns for x in trading.environment.benchmark_returns if x.date >= self.start_date }) start_index = trading.environment.trading_days.searchsorted(start_date) end_date = trading.environment.trading_days[start_index + len(RETURNS)] sim_params = SimulationParameters(start_date, end_date) risk_metrics_refactor = risk.RiskMetricsIterative(sim_params) todays_date = start_date cur_returns = [] for i, ret in enumerate(RETURNS): todays_return_obj = DailyReturn(todays_date, ret) cur_returns.append(todays_return_obj) try: risk_metrics_original = risk.RiskMetricsBatch( start_date=start_date, end_date=todays_date, returns=cur_returns) except Exception as e: #assert that when original raises exception, same #exception is raised by risk_metrics_refactor np.testing.assert_raises( type(e), risk_metrics_refactor.update, todays_date, self.all_benchmark_returns[todays_return_obj.date]) continue risk_metrics_refactor.update( todays_date, ret, self.all_benchmark_returns[todays_return_obj.date]) # Move forward day counter to next trading day todays_date = trading.environment.next_trading_day(todays_date) self.assertEqual(risk_metrics_original.start_date, risk_metrics_refactor.start_date) self.assertEqual(risk_metrics_original.end_date, risk_metrics_refactor.algorithm_returns.index[-1]) self.assertEqual(risk_metrics_original.treasury_period_return, risk_metrics_refactor.treasury_period_return) np.testing.assert_allclose(risk_metrics_original.benchmark_returns, risk_metrics_refactor.benchmark_returns, rtol=0.001) np.testing.assert_allclose(risk_metrics_original.algorithm_returns, risk_metrics_refactor.algorithm_returns, rtol=0.001) risk_original_dict = risk_metrics_original.to_dict() risk_refactor_dict = risk_metrics_refactor.to_dict() self.assertEqual(set(risk_original_dict.keys()), set(risk_refactor_dict.keys())) err_msg_format = """\ "In update step {iter}: {measure} should be {truth} but is {returned}!""" for measure in risk_original_dict.iterkeys(): if measure == 'max_drawdown': np.testing.assert_almost_equal( risk_refactor_dict[measure], risk_original_dict[measure], err_msg=err_msg_format.format( iter=i, measure=measure, truth=risk_original_dict[measure], returned=risk_refactor_dict[measure])) else: if isinstance(risk_original_dict[measure], numbers.Real): np.testing.assert_allclose( risk_original_dict[measure], risk_refactor_dict[measure], rtol=0.001, err_msg=err_msg_format.format( iter=i, measure=measure, truth=risk_original_dict[measure], returned=risk_refactor_dict[measure])) else: np.testing.assert_equal( risk_original_dict[measure], risk_refactor_dict[measure], err_msg=err_msg_format.format( iter=i, measure=measure, truth=risk_original_dict[measure], returned=risk_refactor_dict[measure]))
def load_market_data(bm_symbol='^GSPC'): try: fp_bm = get_datafile(get_benchmark_filename(bm_symbol), "rb") except IOError: print(""" data msgpacks aren't distributed with source. Fetching data from Yahoo Finance. """).strip() dump_benchmarks(bm_symbol) fp_bm = get_datafile(get_benchmark_filename(bm_symbol), "rb") bm_list = msgpack.loads(fp_bm.read()) # Find the offset of the last date for which we have trading data in our # list of valid trading days last_bm_date = tuple_to_date(bm_list[-1][0]) last_bm_date_offset = trading_days.searchsorted( last_bm_date.strftime('%Y/%m/%d')) # If more than 1 trading days has elapsed since the last day where # we have data,then we need to update if len(trading_days) - last_bm_date_offset > 1: update_benchmarks(bm_symbol, last_bm_date) fp_bm = get_datafile(get_benchmark_filename(bm_symbol), "rb") bm_list = msgpack.loads(fp_bm.read()) bm_returns = [] for packed_date, returns in bm_list: event_dt = tuple_to_date(packed_date) daily_return = DailyReturn(date=event_dt, returns=returns) bm_returns.append(daily_return) fp_bm.close() bm_returns = sorted(bm_returns, key=attrgetter('date')) try: fp_tr = get_datafile('treasury_curves.msgpack', "rb") except IOError: print(""" data msgpacks aren't distributed with source. Fetching data from data.treasury.gov """).strip() dump_treasury_curves() fp_tr = get_datafile('treasury_curves.msgpack', "rb") tr_list = msgpack.loads(fp_tr.read()) # Find the offset of the last date for which we have trading data in our # list of valid trading days last_tr_date = tuple_to_date(tr_list[-1][0]) last_tr_date_offset = trading_days.searchsorted( last_tr_date.strftime('%Y/%m/%d')) # If more than 1 trading days has elapsed since the last day where # we have data,then we need to update if len(trading_days) - last_tr_date_offset > 1: update_treasury_curves(last_tr_date) fp_tr = get_datafile('treasury_curves.msgpack', "rb") tr_list = msgpack.loads(fp_tr.read()) tr_curves = {} for packed_date, curve in tr_list: tr_dt = tuple_to_date(packed_date) # tr_dt = tr_dt.replace(hour=0, minute=0, second=0, microsecond=0, # tzinfo=pytz.utc) tr_curves[tr_dt] = curve fp_tr.close() tr_curves = OrderedDict(sorted( ((dt, c) for dt, c in tr_curves.iteritems()), key=lambda t: t[0])) return bm_returns, tr_curves
def test_risk_metrics_returns(self): risk_metrics_refactor = risk.RiskMetricsIterative(self.start_date) todays_date = self.start_date cur_returns = [] for i, ret in enumerate(RETURNS): todays_return_obj = DailyReturn(todays_date, ret) cur_returns.append(todays_return_obj) # Move forward day counter to next trading day todays_date += self.oneday while not trading.environment.is_trading_day(todays_date): todays_date += self.oneday try: risk_metrics_original = risk.RiskMetricsBatch( start_date=self.start_date, end_date=todays_date, returns=cur_returns) except Exception as e: #assert that when original raises exception, same #exception is raised by risk_metrics_refactor np.testing.assert_raises(type(e), risk_metrics_refactor.update, todays_date, ret) continue risk_metrics_refactor.update(todays_date, ret) self.assertEqual(risk_metrics_original.start_date, risk_metrics_refactor.start_date) self.assertEqual(risk_metrics_original.end_date, risk_metrics_refactor.end_date) self.assertEqual(risk_metrics_original.treasury_duration, risk_metrics_refactor.treasury_duration) self.assertEqual(risk_metrics_original.treasury_curve, risk_metrics_refactor.treasury_curve) self.assertEqual(risk_metrics_original.treasury_period_return, risk_metrics_refactor.treasury_period_return) self.assertEqual(risk_metrics_original.benchmark_returns, risk_metrics_refactor.benchmark_returns) self.assertEqual(risk_metrics_original.algorithm_returns, risk_metrics_refactor.algorithm_returns) risk_original_dict = risk_metrics_original.to_dict() risk_refactor_dict = risk_metrics_refactor.to_dict() self.assertEqual(set(risk_original_dict.keys()), set(risk_refactor_dict.keys())) err_msg_format = """\ "In update step {iter}: {measure} should be {truth} but is {returned}!""" for measure in risk_original_dict.iterkeys(): if measure == 'max_drawdown': np.testing.assert_almost_equal( risk_refactor_dict[measure], risk_original_dict[measure], err_msg=err_msg_format.format( iter=i, measure=measure, truth=risk_original_dict[measure], returned=risk_refactor_dict[measure])) else: np.testing.assert_equal( risk_original_dict[measure], risk_refactor_dict[measure], err_msg_format.format( iter=i, measure=measure, truth=risk_original_dict[measure], returned=risk_refactor_dict[measure]))