def test_history_daily_data_1m_window(self): algo_text = """ from zipline.api import history, add_history def initialize(context): add_history(bar_count=1, frequency='1m', field='price') def handle_data(context, data): prices = history(bar_count=3, frequency='1d', field='price') """.strip() start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-30', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end) with self.assertRaises(IncompatibleHistoryFrequency): algo = TradingAlgorithm( script=algo_text, data_frequency='daily', sim_params=sim_params ) source = RandomWalkSource(start=start, end=end) algo.run(source)
def test_history_container_constructed_at_runtime(self): algo_text = dedent( """\ from zipline.api import history def handle_data(context, data): context.prices = history(2, '1d', 'price') """ ) start = pd.Timestamp('2007-04-05', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='daily' ) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params ) source = RandomWalkSource(start=start, end=end) self.assertIsNone(test_algo.history_container) test_algo.run(source) self.assertIsNotNone( test_algo.history_container, msg='HistoryContainer was not constructed at runtime', ) container = test_algo.history_container self.assertEqual( container.buffer_panel.window_length, Frequency.MAX_MINUTES['d'], msg='HistoryContainer.buffer_panel was not large enough to service' ' the given HistorySpec', ) self.assertEqual( len(container.digest_panels), 1, msg='The HistoryContainer created too many digest panels', ) freq, digest = list(container.digest_panels.items())[0] self.assertEqual( freq.unit_str, 'd', ) self.assertEqual( digest.window_length, 1, msg='The digest panel is not large enough to service the given' ' HistorySpec', )
def test_history_container_constructed_at_runtime(self): algo_text = dedent("""\ from zipline.api import history def handle_data(context, data): context.prices = history(2, '1d', 'price') """) start = pd.Timestamp('2007-04-05', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters(period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='daily') test_algo = TradingAlgorithm(script=algo_text, data_frequency='minute', sim_params=sim_params) source = RandomWalkSource(start=start, end=end) self.assertIsNone(test_algo.history_container) test_algo.run(source) self.assertIsNotNone( test_algo.history_container, msg='HistoryContainer was not constructed at runtime', ) container = test_algo.history_container self.assertEqual( container.buffer_panel.window_length, Frequency.MAX_MINUTES['d'], msg='HistoryContainer.buffer_panel was not large enough to service' ' the given HistorySpec', ) self.assertEqual( len(container.digest_panels), 1, msg='The HistoryContainer created too many digest panels', ) freq, digest = list(container.digest_panels.items())[0] self.assertEqual( freq.unit_str, 'd', ) self.assertEqual( digest.window_length, 1, msg='The digest panel is not large enough to service the given' ' HistorySpec', )
def test_history_passed_to_talib(self): """ Had an issue where MagicMock was causing errors during validation with talib. We don't officially support a talib integration, yet. But using talib directly should work. """ algo_text = """ import talib import numpy as np from zipline.api import history, add_history, record def initialize(context): add_history(2, '1d', 'price') def handle_data(context, data): prices = history(2, '1d', 'price') ma_result = talib.MA(np.asarray(prices[0]), timeperiod=2) record(ma=ma_result[-1]) """.strip() # April 2007 # Su Mo Tu We Th Fr Sa # 1 2 3 4 5 6 7 # 8 9 10 11 12 13 14 # 15 16 17 18 19 20 21 # 22 23 24 25 26 27 28 # 29 30 # Eddie: this was set to 04-10 but I don't see how that makes # sense as it does not generate enough data to get at -2 index # below. start = pd.Timestamp("2007-04-05", tz="UTC") end = pd.Timestamp("2007-04-10", tz="UTC") sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency="minute", emission_rate="daily", ) test_algo = TradingAlgorithm( script=algo_text, data_frequency="minute", sim_params=sim_params, env=TestHistoryAlgo.env ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) # At this point, just ensure that there is no crash. self.assertIsNotNone(output) recorded_ma = output.ix[-2, "ma"] self.assertFalse(pd.isnull(recorded_ma)) # Depends on seed np.testing.assert_almost_equal(recorded_ma, 159.76304468946876)
def get_results(self, algo_code): algo = TradingAlgorithm( script=algo_code, env=self.env, sim_params=self.sim_params ) return algo.run(self.data_portal)
def test_history_grow_length_intra_bar(self, incr): """ Tests growing the length of a digest panel with different date_buf deltas in a single bar. """ algo_text = dedent( """\ from zipline.api import history def initialize(context): context.bar_count = 1 def handle_data(context, data): prices = history(context.bar_count, '1d', 'price') context.test_case.assertEqual(len(prices), context.bar_count) context.bar_count += {incr} prices = history(context.bar_count, '1d', 'price') context.test_case.assertEqual(len(prices), context.bar_count) """ ).format(incr=incr) start = pd.Timestamp('2007-04-05', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='daily', env=self.env, ) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=self.env, ) test_algo.test_case = self source = RandomWalkSource(start=start, end=end) self.assertIsNone(test_algo.history_container) test_algo.run(source)
def test_history_passed_to_talib(self): """ Had an issue where MagicMock was causing errors during validation with talib. We don't officially support a talib integration, yet. But using talib directly should work. """ algo_text = """ import talib import numpy as np from zipline.api import history, add_history, record def initialize(context): add_history(2, '1d', 'price') def handle_data(context, data): prices = history(2, '1d', 'price') ma_result = talib.MA(np.asarray(prices[0]), timeperiod=2) record(ma=ma_result[-1]) """.strip() # April 2007 # Su Mo Tu We Th Fr Sa # 1 2 3 4 5 6 7 # 8 9 10 11 12 13 14 # 15 16 17 18 19 20 21 # 22 23 24 25 26 27 28 # 29 30 # Eddie: this was set to 04-10 but I don't see how that makes # sense as it does not generate enough data to get at -2 index # below. start = pd.Timestamp('2007-04-05', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters(period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='daily') test_algo = TradingAlgorithm(script=algo_text, data_frequency='minute', sim_params=sim_params) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) # At this point, just ensure that there is no crash. self.assertIsNotNone(output) recorded_ma = output.ix[-2, 'ma'] self.assertFalse(pd.isnull(recorded_ma)) # Depends on seed np.testing.assert_almost_equal(recorded_ma, 159.76304468946876)
def run_algo(self, code, sim_params=None, data_frequency="daily"): if sim_params is None: sim_params = self.sim_params test_algo = TradingAlgorithm(script=code, sim_params=sim_params, env=self.env, data_frequency=data_frequency) results = test_algo.run(FetcherDataPortal(self.env)) return results
def test_basic_history_positional_args(self): """ Ensure that positional args work. """ algo_text = """ import copy from zipline.api import history, add_history def initialize(context): add_history(2, '1d', 'price') def handle_data(context, data): prices = history(2, '1d', 'price') context.last_prices = copy.deepcopy(prices) """.strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-21', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) self.assertIsNotNone(output) last_prices = test_algo.last_prices[0] oldest_dt = pd.Timestamp( '2006-03-20 4:00 PM', tz='US/Eastern').tz_convert('UTC') newest_dt = pd.Timestamp( '2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC') self.assertEquals(oldest_dt, last_prices.index[0]) self.assertEquals(newest_dt, last_prices.index[-1]) self.assertEquals(139.36946942498648, last_prices[oldest_dt]) self.assertEquals(180.15661995395106, last_prices[newest_dt])
def test_basic_history_one_day(self): algo_text = """ from zipline.api import history, add_history def initialize(context): add_history(bar_count=1, frequency='1d', field='price') def handle_data(context, data): prices = history(bar_count=1, frequency='1d', field='price') context.last_prices = prices """.strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-21', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) self.assertIsNotNone(output) last_prices = test_algo.last_prices[0] # oldest and newest should be the same if there is only 1 bar oldest_dt = pd.Timestamp( '2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC') newest_dt = pd.Timestamp( '2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC') self.assertEquals(oldest_dt, last_prices.index[0]) self.assertEquals(newest_dt, last_prices.index[-1]) # Random, depends on seed self.assertEquals(180.15661995395106, last_prices[oldest_dt]) self.assertEquals(180.15661995395106, last_prices[newest_dt])
def test_current_contract_in_algo(self): code = dedent(""" from zipline.api import ( record, continuous_future, schedule_function, get_datetime, ) def initialize(algo): algo.primary_cl = continuous_future('FO', 0, 'calendar') algo.secondary_cl = continuous_future('FO', 1, 'calendar') schedule_function(record_current_contract) def record_current_contract(algo, data): record(datetime=get_datetime()) record(primary=data.current(algo.primary_cl, 'contract')) record(secondary=data.current(algo.secondary_cl, 'contract')) """) algo = TradingAlgorithm(script=code, sim_params=self.sim_params, trading_calendar=self.trading_calendar, env=self.env) results = algo.run(self.data_portal) result = results.iloc[0] self.assertEqual(result.primary.symbol, 'FOF16', 'Primary should be FOF16 on first session.') self.assertEqual(result.secondary.symbol, 'FOG16', 'Secondary should be FOG16 on first session.') result = results.iloc[1] # Second day, primary should switch to FOG self.assertEqual(result.primary.symbol, 'FOG16', 'Primary should be FOG16 on second session, auto ' 'close is at beginning of the session.') self.assertEqual(result.secondary.symbol, 'FOH16', 'Secondary should be FOH16 on second session, auto ' 'close is at beginning of the session.') result = results.iloc[2] # Second day, primary should switch to FOG self.assertEqual(result.primary.symbol, 'FOG16', 'Primary should remain as FOG16 on third session.') self.assertEqual(result.secondary.symbol, 'FOH16', 'Secondary should remain as FOH16 on third session.')
def test_history_daily(self): bar_count = 3 algo_text = """ from zipline.api import history, add_history def initialize(context): add_history(bar_count={bar_count}, frequency='1d', field='price') context.history_trace = [] def handle_data(context, data): prices = history(bar_count={bar_count}, frequency='1d', field='price') context.history_trace.append(prices) """.format(bar_count=bar_count).strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-30', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end, data_frequency='daily', env=self.env, ) _, df = factory.create_test_df_source(sim_params, self.env) df = df.astype(np.float64) source = DataFrameSource(df) test_algo = TradingAlgorithm( script=algo_text, data_frequency='daily', sim_params=sim_params, env=TestHistoryAlgo.env, ) output = test_algo.run(source) self.assertIsNotNone(output) df.columns = self.env.asset_finder.retrieve_all(df.columns) for i, received in enumerate(test_algo.history_trace[bar_count - 1:]): expected = df.iloc[i:i + bar_count] assert_frame_equal(expected, received)
def test_history_container_constructed_at_runtime(self, data_freq): algo_text = dedent( """\ from zipline.api import history def handle_data(context, data): context.prices = history(2, '1d', 'price') """ ) start = pd.Timestamp("2007-04-05", tz="UTC") end = pd.Timestamp("2007-04-10", tz="UTC") sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency=data_freq, emission_rate=data_freq, ) test_algo = TradingAlgorithm( script=algo_text, data_frequency=data_freq, sim_params=sim_params, env=TestHistoryAlgo.env ) source = RandomWalkSource(start=start, end=end, freq=data_freq) self.assertIsNone(test_algo.history_container) test_algo.run(source) self.assertIsNotNone(test_algo.history_container, msg="HistoryContainer was not constructed at runtime") container = test_algo.history_container self.assertEqual(len(container.digest_panels), 1, msg="The HistoryContainer created too many digest panels") freq, digest = list(container.digest_panels.items())[0] self.assertEqual(freq.unit_str, "d") self.assertEqual( digest.window_length, 1, msg="The digest panel is not large enough to service the given" " HistorySpec" )
def test_history_passed_to_func(self): """ Had an issue where MagicMock was causing errors during validation with rolling mean. """ algo_text = """ from zipline.api import history, add_history import pandas as pd def initialize(context): add_history(2, '1d', 'price') def handle_data(context, data): prices = history(2, '1d', 'price') pd.rolling_mean(prices, 2) """.strip() # April 2007 # Su Mo Tu We Th Fr Sa # 1 2 3 4 5 6 7 # 8 9 10 11 12 13 14 # 15 16 17 18 19 20 21 # 22 23 24 25 26 27 28 # 29 30 start = pd.Timestamp('2007-04-10', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='minute' ) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) # At this point, just ensure that there is no crash. self.assertIsNotNone(output)
def run_algo_single(**algo_descr): if 'constraint_func' in algo_descr: if algo_descr['constraint_func'](algo_descr['param_set']): return np.nan try: algo = TradingAlgorithm(initialize=algo_descr['initialize'], handle_data=algo_descr['handle_data'], **algo_descr['param_set']) perf = algo.run(algo_descr['data']) daily_rets = perf.portfolio_value.pct_change().dropna() if daily_rets.std() > 0: sharpe_ratio_calc = daily_rets.mean() / daily_rets.std() * np.sqrt( 252) else: sharpe_ratio_calc = -999 risk_report = algo.risk_report risk_cum = pd.Series( algo.perf_tracker.cumulative_risk_metrics.to_dict()) except ImportError as e: print(e) return np.nan # Apply objective functions objective = algo_descr.get('objective', 'none') if objective == 'none': obj = (perf, risk_cum, risk_report) elif objective == 'sharpe': obj = sharpe_ratio_calc elif objective == 'total_return': obj = perf['portfolio_value'][-1] / perf['portfolio_value'][0] - 1 elif callable(objective): obj = objective(perf, risk_cum, risk_report) else: raise NotImplemented('Objective %s not implemented.' % algo_descr['objective']) print "Sharpe: " + str(sharpe_ratio_calc) + " %_Return: " + str( perf.portfolio_value[-1] / perf.portfolio_value[0] - 1) + " MaxDD: " + str(perf.max_drawdown[-1]) + " MaxExp: " + str( perf.max_leverage[-1]) return obj
def test_history_with_volume(self): algo_text = """ from zipline.api import history, add_history, record def initialize(context): add_history(3, '1d', 'volume') def handle_data(context, data): volume = history(3, '1d', 'volume') record(current_volume=volume[0].ix[-1]) """.strip() # April 2007 # Su Mo Tu We Th Fr Sa # 1 2 3 4 5 6 7 # 8 9 10 11 12 13 14 # 15 16 17 18 19 20 21 # 22 23 24 25 26 27 28 # 29 30 start = pd.Timestamp('2007-04-10', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='minute' ) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) np.testing.assert_equal(output.ix[0, 'current_volume'], 212218404.0)
def test_history_with_open(self): algo_text = """ from zipline.api import history, add_history, record def initialize(context): add_history(3, '1d', 'open_price') def handle_data(context, data): opens = history(3, '1d', 'open_price') record(current_open=opens[0].ix[-1]) """.strip() # April 2007 # Su Mo Tu We Th Fr Sa # 1 2 3 4 5 6 7 # 8 9 10 11 12 13 14 # 15 16 17 18 19 20 21 # 22 23 24 25 26 27 28 # 29 30 start = pd.Timestamp('2007-04-10', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='minute' ) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) np.testing.assert_equal(output.ix[0, 'current_open'], 99.991436939669939)
def markowitz(stocks, cash): warnings.filterwarnings("once") solvers.options['show_progress'] = False end = pd.Timestamp.utcnow() start = end - 50 * pd.tseries.offsets.BDay() data = load_bars_from_yahoo(stocks=stocks, start=start, end=end) # Instantinate algorithm algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data, cash=cash) # Run algorithm results = algo.run(data) # portfolio value plot raw_plot = results.portfolio_value.plot() raw_fig = raw_plot.get_figure() returns_plot = mpld3.fig_to_html(raw_fig) raw_fig.clf() #stock price plot raw_price_data = data.loc[:, :, 'price'].pct_change(1).fillna(0).applymap( lambda x: x + 1).cumprod().applymap(lambda x: x * 100) raw_price_plot = raw_price_data.plot(figsize=(8, 5)) raw_price_fig = raw_price_plot.get_figure() price_plot = mpld3.fig_to_html(raw_price_fig) raw_price_fig.clf() #final returns equalweight_returns = sum(map(list, raw_price_data.tail(1).values)[0]) / 4 - 100 equalweight_returns = '{0:.2f}%'.format(float(equalweight_returns)) optimal_returns = (results.portfolio_value.tail(1).iloc[0] - 100000) / 1000 optimal_returns = '{0:.2f}%'.format(float(optimal_returns)) #efficient frontier plot frontier_plot_data = open("plot.png", "rb").read() # serialize to HTTP response frontier_plot = HttpResponse(frontier_plot_data, content_type="image/png") return (results, returns_plot, price_plot, frontier_plot, equalweight_returns, optimal_returns)
def test_basic_history(self): algo_text = """ from zipline.api import history, add_history def initialize(context): add_history(bar_count=2, frequency='1d', field='price') def handle_data(context, data): prices = history(bar_count=2, frequency='1d', field='price') prices['prices_times_two'] = prices[1] * 2 context.last_prices = prices """.strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp("2006-03-20", tz="UTC") end = pd.Timestamp("2006-03-21", tz="UTC") sim_params = factory.create_simulation_parameters(start=start, end=end) test_algo = TradingAlgorithm( script=algo_text, data_frequency="minute", sim_params=sim_params, env=TestHistoryAlgo.env ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) self.assertIsNotNone(output) last_prices = test_algo.last_prices[0] oldest_dt = pd.Timestamp("2006-03-20 4:00 PM", tz="US/Eastern").tz_convert("UTC") newest_dt = pd.Timestamp("2006-03-21 4:00 PM", tz="US/Eastern").tz_convert("UTC") self.assertEquals(oldest_dt, last_prices.index[0]) self.assertEquals(newest_dt, last_prices.index[-1]) # Random, depends on seed self.assertEquals(139.36946942498648, last_prices[oldest_dt]) self.assertEquals(180.15661995395106, last_prices[newest_dt])
def markowitz(stocks, cash): warnings.filterwarnings("once") solvers.options['show_progress'] = False end = pd.Timestamp.utcnow() start = end - 50 * pd.tseries.offsets.BDay() data = load_bars_from_yahoo(stocks=stocks, start=start, end=end) # Instantinate algorithm algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data, cash=cash) # Run algorithm results = algo.run(data) # portfolio value plot raw_plot = results.portfolio_value.plot() raw_fig = raw_plot.get_figure() returns_plot = mpld3.fig_to_html(raw_fig) raw_fig.clf() #stock price plot raw_price_data = data.loc[:, :, 'price'].pct_change(1).fillna(0).applymap(lambda x: x + 1).cumprod().applymap(lambda x: x * 100) raw_price_plot = raw_price_data.plot(figsize=(8,5)) raw_price_fig = raw_price_plot.get_figure() price_plot = mpld3.fig_to_html(raw_price_fig) raw_price_fig.clf() #final returns equalweight_returns = sum(map(list, raw_price_data.tail(1).values)[0]) / 4 - 100 equalweight_returns = '{0:.2f}%'.format(float(equalweight_returns)) optimal_returns = (results.portfolio_value.tail(1).iloc[0] - 100000) / 1000 optimal_returns = '{0:.2f}%'.format(float(optimal_returns)) #efficient frontier plot frontier_plot_data = open("plot.png", "rb").read() # serialize to HTTP response frontier_plot = HttpResponse(frontier_plot_data, content_type="image/png") return(results, returns_plot, price_plot, frontier_plot, equalweight_returns, optimal_returns)
def run_algo_single(**algo_descr): if 'constraint_func' in algo_descr: if algo_descr['constraint_func'](algo_descr['param_set']): return np.nan try: algo = TradingAlgorithm(initialize=algo_descr['initialize'], handle_data=algo_descr['handle_data'], **algo_descr['param_set'] ) perf = algo.run(algo_descr['data']) daily_rets = perf.portfolio_value.pct_change().dropna() if daily_rets.std() > 0: sharpe_ratio_calc = daily_rets.mean() / daily_rets.std() * np.sqrt(252) else: sharpe_ratio_calc = -999 risk_report = algo.risk_report risk_cum = pd.Series(algo.perf_tracker.cumulative_risk_metrics.to_dict()) except ImportError as e: print(e) return np.nan # Apply objective functions objective = algo_descr.get('objective', 'none') if objective == 'none': obj = (perf, risk_cum, risk_report) elif objective == 'sharpe': obj = sharpe_ratio_calc elif objective == 'total_return': obj = perf['portfolio_value'][-1] / perf['portfolio_value'][0] - 1 elif callable(objective): obj = objective(perf, risk_cum, risk_report) else: raise NotImplemented('Objective %s not implemented.' % algo_descr['objective']) print "Sharpe: " + str(sharpe_ratio_calc) + " %_Return: " + str(perf.portfolio_value[-1]/perf.portfolio_value[0]-1) + " MaxDD: " + str(perf.max_drawdown[-1]) + " MaxExp: " + str(perf.max_leverage[-1]) return obj
def test_history_with_high(self): algo_text = """ from zipline.api import history, add_history, record def initialize(context): add_history(3, '1d', 'high') def handle_data(context, data): highs = history(3, '1d', 'high') record(current_high=highs[0].ix[-1]) """.strip() # April 2007 # Su Mo Tu We Th Fr Sa # 1 2 3 4 5 6 7 # 8 9 10 11 12 13 14 # 15 16 17 18 19 20 21 # 22 23 24 25 26 27 28 # 29 30 start = pd.Timestamp("2007-04-10", tz="UTC") end = pd.Timestamp("2007-04-10", tz="UTC") sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency="minute", emission_rate="minute", ) test_algo = TradingAlgorithm( script=algo_text, data_frequency="minute", sim_params=sim_params, env=TestHistoryAlgo.env ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) np.testing.assert_equal(output.ix[0, "current_high"], 139.5370641791925)
def startbacktest(): starttime = request.args.get('starttime') endtime = request.args.get('endtime') code = request.args.get('code') #处理代码 old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = mystdout = StringIO() sys.stderr = mystderr = StringIO() try: algo = TradingAlgorithm(script=code, startdate=starttime,enddate=endtime,capital_base=10000,benchmark='sz399004') results = algo.run(input_data) print results except Exception as error: print('caught this error: ' + repr(error)) sys.stdout = old_stdout sys.stderr = old_stderr print mystdout.getvalue() print mystderr.getvalue() json_results = [] return toJson(json_results)
def test_current_chain_in_algo(self): code = dedent(""" from zipline.api import ( record, continuous_future, schedule_function, get_datetime, ) def initialize(algo): algo.primary_cl = continuous_future('FO', 0, 'calendar') algo.secondary_cl = continuous_future('FO', 1, 'calendar') schedule_function(record_current_contract) def record_current_contract(algo, data): record(datetime=get_datetime()) primary_chain = data.current_chain(algo.primary_cl) secondary_chain = data.current_chain(algo.secondary_cl) record(primary_len=len(primary_chain)) record(primary_first=primary_chain[0].symbol) record(primary_last=primary_chain[-1].symbol) record(secondary_len=len(secondary_chain)) record(secondary_first=secondary_chain[0].symbol) record(secondary_last=secondary_chain[-1].symbol) """) algo = TradingAlgorithm(script=code, sim_params=self.sim_params, trading_calendar=self.trading_calendar, env=self.env) results = algo.run(self.data_portal) result = results.iloc[0] self.assertEqual(result.primary_len, 4, 'There should be only 4 contracts in the chain for ' 'the primary, there are 5 contracts defined in the ' 'fixture, but one has a start after the simulation ' 'date.') self.assertEqual(result.secondary_len, 3, 'There should be only 3 contracts in the chain for ' 'the primary, there are 5 contracts defined in the ' 'fixture, but one has a start after the simulation ' 'date. And the first is not included because it is ' 'the primary on that date.') self.assertEqual(result.primary_first, 'FOF16', 'Front of primary chain should be FOF16 on first ' 'session.') self.assertEqual(result.secondary_first, 'FOG16', 'Front of secondary chain should be FOG16 on first ' 'session.') self.assertEqual(result.primary_last, 'FOJ16', 'End of primary chain should be FOJ16 on first ' 'session.') self.assertEqual(result.secondary_last, 'FOJ16', 'End of secondary chain should be FOJ16 on first ' 'session.') # Second day, primary should switch to FOG result = results.iloc[1] self.assertEqual(result.primary_len, 3, 'There should be only 3 contracts in the chain for ' 'the primary, there are 5 contracts defined in the ' 'fixture, but one has a start after the simulation ' 'date. The first is not included because of roll.') self.assertEqual(result.secondary_len, 2, 'There should be only 2 contracts in the chain for ' 'the primary, there are 5 contracts defined in the ' 'fixture, but one has a start after the simulation ' 'date. The first is not included because of roll, ' 'the second is the primary on that date.') self.assertEqual(result.primary_first, 'FOG16', 'Front of primary chain should be FOG16 on second ' 'session.') self.assertEqual(result.secondary_first, 'FOH16', 'Front of secondary chain should be FOH16 on second ' 'session.') # These values remain FOJ16 because fixture data is not exhaustive # enough to move the end of the chain. self.assertEqual(result.primary_last, 'FOJ16', 'End of primary chain should be FOJ16 on second ' 'session.') self.assertEqual(result.secondary_last, 'FOJ16', 'End of secondary chain should be FOJ16 on second ' 'session.')
security.symbol: data[:data.index[0] + datetime.timedelta(days=10)] }) count += 1 print str(count) + " : " + security.symbol except: continue def initialize(context): context.stock = symbol(security.symbol) context.traded = False def handle_data(context, data): if context.traded: return print data order(context.stock, 10000) order(context.stock, -10000, style=LimitOrder(data[context.stock]['close'] * 1.1)) context.traded = True # NOTE: This cell will take a few minutes to run. # Create algorithm object passing in initialize and # handle_data functions algo_obj = TradingAlgorithm(initialize=initialize, handle_data=handle_data) # Run algorithm perf_manual[security.symbol] = algo_obj.run(data)
from zipline.api import order, sid from zipline.data.loader import load_bars_from_yahoo # creating time interval start = pd.Timestamp('2008-01-01', tz='UTC') end = pd.Timestamp('2013-01-01', tz='UTC') # loading the data input_data = load_bars_from_yahoo( stocks=['AAPL', 'MSFT'], start=start, end=end, ) #checking if I can merge this def initialize(context): context.has_ordered = False def handle_data(context, data): if not context.has_ordered: for stock in data: order(sid(stock), 100) context.has_ordered = True algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data) results = algo.run(input_data)
from datetime import datetime import pytz from zipline import TradingAlgorithm from zipline.utils.factory import load_from_yahoo from zipline.api import order def initialize(context): context.test = 10 def handle_date(context, data): order('AAPL', 10) print(context.test) if __name__ == '__main__': import pylab as pl start = datetime(2008, 1, 1, 0, 0, 0, 0, pytz.utc) end = datetime(2010, 1, 1, 0, 0, 0, 0, pytz.utc) data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start, end=end) data = data.dropna() algo = TradingAlgorithm(initialize=initialize, handle_data=handle_date) results = algo.run(data) results.portfolio_value.plot() pl.show()
def test_history_in_bts_volume_days(self, data_freq): """ Test calling history() in before_trading_start() with daily volume bars. """ algo_text = """ from zipline.api import history def initialize(context): context.first_bts_call = True def before_trading_start(context, data): if not context.first_bts_call: volume_bts = history(bar_count=2, frequency='1d', field='volume') context.volume_bts = volume_bts context.first_bts_call = False def handle_data(context, data): volume_hd = history(bar_count=2, frequency='1d', field='volume') context.volume_hd = volume_hd """.strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-22', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end, data_frequency=data_freq) test_algo = TradingAlgorithm( script=algo_text, data_frequency=data_freq, sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end, freq=data_freq) output = test_algo.run(source) self.assertIsNotNone(output) # Get the volume recorded by history() within handle_data() volume_hd_0 = test_algo.volume_hd[0] volume_hd_1 = test_algo.volume_hd[1] # Get the volume recorded by history() within BTS volume_bts_0 = test_algo.volume_bts[0] volume_bts_1 = test_algo.volume_bts[1] penultimate_hd_dt = pd.Timestamp( '2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC') # Midnight of the day on which BTS is invoked. newest_bts_dt = normalize_date(pd.Timestamp( '2006-03-22 04:00 PM', tz='US/Eastern').tz_convert('UTC')) if data_freq == 'daily': # If we're dealing with daily data, then we record # canonicalized timestamps, so make conversion here: penultimate_hd_dt = normalize_date(penultimate_hd_dt) # When history() is called in BTS, its 'current' volume value # should equal the sum of the previous day. self.assertEquals(volume_hd_0[penultimate_hd_dt], volume_bts_0[newest_bts_dt]) self.assertEquals(volume_hd_1[penultimate_hd_dt], volume_bts_1[newest_bts_dt])
def test_history_in_bts_volume_minutes(self): """ Test calling history() in before_trading_start() with minutely volume bars. """ algo_text = """ from zipline.api import history def initialize(context): context.first_bts_call = True def before_trading_start(context, data): if not context.first_bts_call: volume_bts = history(bar_count=2, frequency='1m', field='volume') context.volume_bts = volume_bts context.first_bts_call = False def handle_data(context, data): pass """.strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-22', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) self.assertIsNotNone(output) # Get the volumes recorded for sid 0 by history() within BTS volume_bts_0 = test_algo.volume_bts[0] # Get the volumes recorded for sid 1 by history() within BTS volume_bts_1 = test_algo.volume_bts[1] # The values recorded on 2006-03-22 by history() in BTS # should equal the final volume values for the trading # day 2006-03-21: # 0 1 # 2006-03-21 20:59:00 215548 439908 # 2006-03-21 21:00:00 985645 664313 # # Note: These are not 'real' volume values. They are the product of # RandonWalkSource, which produces random walk OHLCV timeseries. For a # given seed these values are deterministc. self.assertEquals(215548, volume_bts_0.ix[0]) self.assertEquals(985645, volume_bts_0.ix[1]) self.assertEquals(439908, volume_bts_1.ix[0]) self.assertEquals(664313, volume_bts_1.ix[1])
def run_backtest(params, instruments = ['close'], filename = None, verbose = False, asset_type = 'Top'): # instruments says to wichi value(s) we look at (open, close, etc) # Set global variables global PARAMS try: thismodule = sys.modules[__name__] general_parameters = params['general_parameters'] for key, value in general_parameters.iteritems(): setattr(thismodule, key, value) except: print "Warning: General parameters loading was not completed successfully! However, going to proceed with the default parameters..." PARAMS = params stocks = np.unique(np.loadtxt(ASSET_READER, dtype=str, delimiter='/n')) stocks = [str(stocks[i]) for i in xrange(len(stocks))] # convert elements in stocks from numpy._string to python.string stocks.sort() returns_method = RETS_READER if RETS_READER != 'None' else 'NONE' volatility_method = VOL_READER if VOL_READER != 'None' else 'NONE' correlation_method = CORR_READER if CORR_READER != 'None' else 'NONE' start_t = time.time() if verbose: print "Going to read data... " last_date_db = START_DATE_BT_STR db = utils.make_db_connection() cursor = db.cursor() query_1 = """select DT_DATE from PASS_SYS.T_PASS_BACKTEST where ST_OPTIMIZATIONMETHOD = '%s' and ST_RETURNPREDICTIONMETHOD = '%s' and ST_VOLATILITYPREDICTIONMETHOD = '%s' and ST_CORRELATIONPREDICTIONMETHOD = '%s' and ST_UNIVERSE = '%s' order by DT_DATE """ % (ID, returns_method, volatility_method, correlation_method, 'Top')#TODO should be ASSET_READER instead of hardcode 'Top' select_1= pd.read_sql_query(query_1, db, index_col='DT_DATE') if len(select_1) > 0: last_date_db = select_1.index[-1] db.close() #TODO END_DATE_BT_STR should be recalculated to the last rebalance date so we don't have the last month with 0 allocations data = utils.load_data_from_passdb(stocks, instruments = instruments, set_price = REBALANCE_ACTION_TYPE, start = last_date_db, end = END_DATE_BT_STR, align_dates = False, transform_to_weights = False, add_dummy_volume = True, output_asset_info = False, convert_curr = REF_CURNCY) df_info = pd.read_csv('./Info/assets_info.csv') data_price_df = utils.truncate_data(data.minor_xs('price'), df_info) symbols = data_price_df.columns.tolist() data = data[symbols] for symbol_ in symbols: data[symbol_]['price'] = data_price_df[symbol_] if verbose: print "Time to read data: ", time.time() - start_t date_limits = utils.get_date_limits(data.minor_xs('price')) print 'get_date_limits' print date_limits print '' print '' print agdjagj algo = TradingAlgorithm(initialize=initialize, handle_data = handle_data, date_limits = date_limits, verbose = verbose, symbols = symbols) if verbose: print "Going to run..." results = algo.run(data) results.index = results.index.date #TODO should be ASSET_READER instead of hardcode 'Top' store_backtest_results_db(ID, returns_method, volatility_method, correlation_method, 'Top', results) #TODO don't send to csv unless there's some option to do it if filename is not None: results.to_csv(filename) return results
def test_history_in_bts_price_minutes(self): """ Test calling history() in before_trading_start() with minutely price bars. """ algo_text = """ from zipline.api import history def initialize(context): context.first_bts_call = True def before_trading_start(context, data): if not context.first_bts_call: price_bts = history(bar_count=1, frequency='1m', field='price') context.price_bts = price_bts context.first_bts_call = False def handle_data(context, data): pass """.strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-22', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) self.assertIsNotNone(output) # Get the prices recorded by history() within BTS price_bts_0 = test_algo.price_bts[0] price_bts_1 = test_algo.price_bts[1] # The prices recorded by history() in BTS should # be the closing price of the previous day, which are: # # sid | close on 2006-03-21 # ---------------------------- # 0 | 180.15661995395106 # 1 | 578.41665003444723 # These are not 'real' price values. They are the product of # RandonWalkSource, which produces random walk OHLCV timeseries. For a # given seed these values are deterministc. self.assertEquals(180.15661995395106, price_bts_0.ix[0]) self.assertEquals(578.41665003444723, price_bts_1.ix[0])
def test_history_in_bts_price_days(self, data_freq): """ Test calling history() in before_trading_start() with daily price bars. """ algo_text = """ from zipline.api import history def initialize(context): context.first_bts_call = True def before_trading_start(context, data): if not context.first_bts_call: prices_bts = history(bar_count=3, frequency='1d', field='price') context.prices_bts = prices_bts context.first_bts_call = False def handle_data(context, data): prices_hd = history(bar_count=3, frequency='1d', field='price') context.prices_hd = prices_hd """.strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-22', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end, data_frequency=data_freq) test_algo = TradingAlgorithm( script=algo_text, data_frequency=data_freq, sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end, freq=data_freq) output = test_algo.run(source) self.assertIsNotNone(output) # Get the prices recorded by history() within handle_data() prices_hd = test_algo.prices_hd[0] # Get the prices recorded by history() within BTS prices_bts = test_algo.prices_bts[0] # before_trading_start() is timestamp'd to midnight prior to # the day's trading. Since no equity trades occur at midnight, # the price recorded for this time is forward filled from the # last trade - typically ~4pm the previous day. This results # in the OHLCV data recorded by history() in BTS lagging # that recorded by history in handle_data(). # The trace of the pricing data from history() called within # handle_data() vs. BTS in the above algo is as follows: # When called within handle_data() # --------------------------------- # 2006-03-20 21:00:00 139.369469 # 2006-03-21 21:00:00 180.156620 # 2006-03-22 21:00:00 221.344654 # When called within BTS # --------------------------------- # 2006-03-17 21:00:00 NaN # 2006-03-20 21:00:00 139.369469 # 2006-03-22 00:00:00 180.156620 # Get relevant Timestamps for the history() call within handle_data() oldest_hd_dt = pd.Timestamp( '2006-03-20 4:00 PM', tz='US/Eastern').tz_convert('UTC') penultimate_hd_dt = pd.Timestamp( '2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC') # Get relevant Timestamps for the history() call within BTS penultimate_bts_dt = pd.Timestamp( '2006-03-20 4:00 PM', tz='US/Eastern').tz_convert('UTC') newest_bts_dt = normalize_date(pd.Timestamp( '2006-03-22 04:00 PM', tz='US/Eastern').tz_convert('UTC')) if data_freq == 'daily': # If we're dealing with daily data, then we record # canonicalized timestamps, so make conversion here: oldest_hd_dt = normalize_date(oldest_hd_dt) penultimate_hd_dt = normalize_date(penultimate_hd_dt) penultimate_bts_dt = normalize_date(penultimate_bts_dt) self.assertEquals(prices_hd[oldest_hd_dt], prices_bts[penultimate_bts_dt]) self.assertEquals(prices_hd[penultimate_hd_dt], prices_bts[newest_bts_dt])
if False: log.info("do some testing ONLY") createStrategy(createStatements()) quit() log.info("it's __main__, do something") log.info('load data') data = OrderedDict() # df = pd.read_csv('data/603997.csv', index_col='date', parse_dates=['date']).tail(2770) df = pd.read_csv('data/603997.csv', index_col='date', parse_dates=['date']).tail(100) df['test'] = [i for i in range(len(df.index))] log.info(df.columns) log.info(df.head(5)) data['603997'] = df panel = pd.Panel(data) algo_obj = TradingAlgorithm(initialize=initialize, handle_data=handle_data) log.info('run algorithm') perf_manual = algo_obj.run(panel) print(perf_manual) if True: import pickle file = "e:\\perf.p" log.info("write performance to pickle : %s" % file) pickle.dump(perf_manual, open(file, 'wb')) log.info('done')
data = pd.Panel({security.symbol: data[:data.index[0] + datetime.timedelta(days=10)]} ) count +=1 print str(count) + " : " + security.symbol except: continue def initialize(context): context.stock = symbol(security.symbol) context.traded = False def handle_data(context, data): if context.traded: return print data order(context.stock, 10000) order(context.stock, - 10000, style=LimitOrder(data[context.stock]['close'] * 1.1)) context.traded = True # NOTE: This cell will take a few minutes to run. # Create algorithm object passing in initialize and # handle_data functions algo_obj = TradingAlgorithm( initialize=initialize, handle_data=handle_data ) # Run algorithm perf_manual[security.symbol] = algo_obj.run(data)
def run_trading(self) : algo = TradingAlgorithm(initialize=self.initialize, handle_data=self.handle_data) results = algo.run(self.data) return results
context: The same context object from the initialize function. Stores the up to date portfolio as well as any state variables defined. Returns None ''' # Allow history to accumulate 100 days of prices before trading # and rebalance every day thereafter. context.tick += 1 if context.tick < 100: return # Get rolling window of past prices and compute returns prices = history(100, '1d', 'price').dropna() returns = prices.pct_change().dropna() try: # Perform Markowitz-style portfolio optimization weights, _, _ = optimal_portfolio(returns.T) # Rebalance portfolio accordingly for stock, weight in zip(prices.columns, weights): order_target_percent(stock, weight) except ValueError as e: # Sometimes this error is thrown # ValueError: Rank(A) < p or Rank([P; A; G]) < n pass # Instantinate algorithm algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data) # Run algorithm results = algo.run(total_data.T) results.portfolio_value.plot()
if __name__ =='__main__': import sys import pytz import matplotlib.pyplot as plt from zipline import TradingAlgorithm from zipline.utils.factory import load_from_yahoo import argparse parser = argparse.ArgumentParser(description='predict/test using similarity-prediction') parser.add_argument('-t', '--ticker', action='store', default='AAPL', help='tickers to predict/test') parser.add_argument('-m', '--mamethod', action='store', choices=['ema','ma'], default='ema', help='ma method to pre-process the Close/Volume') parser.add_argument('-p', '--maperiod', action='store', type=int, default=20, help='period to ma Close/Volume') parser.add_argument('-w', '--window', action='store', type=int, default=20, help='window size to match') parser.add_argument('-a', '--lookahead', action='store', type=int, default=1, help='days to lookahead when predict') parser.add_argument('-c', '--mincorr', action='store', type=float, default=0.9, help='days to lookahead when predict') parser.add_argument('-b', '--begin', action='store', type=str, default='20100101', help='start of the market data') parser.add_argument('-e', '--end', action='store', type=str, default='20161221', help='end of the market data') args = parser.parse_args() #start = datetime.datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc) #end = datetime.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc) #data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start, end=end, adjusted=False) tickers = [t.strip() for t in args.ticker.split(',') if t.strip()] data = prepare_data(tickers, start=args.begin, end=args.end) algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data, capital_base=50000, xdata=data, xticker=tickers, xstart=args.begin, xend=args.end, window=args.window) res = algo.run(data).dropna() analyze(res, tickers[0])
# Plot the portfolio and asset data. ax1 = plt.subplot(211) #211表示2行1列两个子图,现在正在画第一个portfolio_value图,ax是一个Axes对象 results['portfolio_value'].plot( ax=ax1, color='b', grid=True, linewidth=1.6 ) # results的index是日期,根据输出的csv结果,有列名SYMBOL600111,algo_volatility,algorithm_period_return,alpha,benchmark_period_return,benchmark_volatility,beta,capital_used,ending_cash,ending_exposure,ending_value,excess_return,gross_leverage,information,long_exposure,long_value,longs_count,max_drawdown,max_leverage,net_leverage,orders,pnl,portfolio_value,positions,returns,sharpe,short_exposure,short_value,shorts_count,sortino,starting_cash,starting_exposure,starting_value,trading_days,transactions,treasury_period_return ax1.set_ylabel('Portfolio Performance') #把y轴命名为Portfolio Performance ax2 = plt.subplot( 212, sharex=ax1) #取得另外一个Axes对象,212表示2行1列两个子图,现在正在画第二个get(SYMBOL)图 results.get(SYMBOL).plot(ax=ax2, grid=True, linewidth=1.6) #dataframe.get方法,SYMBOL可以改为 ax2.set_ylabel('%s price ' % SYMBOL) #把第二个子图的名字命名为price,后面那个取SYMBOL的名字 results.to_csv('aaaaaaaaaaaaaa.csv') # Show the plot. plt.gcf().set_size_inches( 18, 8) # plt.gcf():Get a reference to the current figure.设置尺寸 plt.show() bars = get_data_from_tushare( # Using tushare to get data which could be handled by zipline later. stocks=[SYMBOL], start="2015-10-09", end="2016-02-07", ) #bars是一个DataFrame类型,索引就是日期,还有一列是'600111'的收盘价,从tushare调用数据 algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data, identifiers=[SYMBOL]) perf = algo.run(bars) #performance是一个DataFrame类型 plot_function(results=perf)
stocks = ['AAPL', 'MSFT'] def initialize(context): context.has_ordered = False context.stocks = stocks def handle_data(context, data): if not context.has_ordered: for stock in context.stocks: order(symbol(stock), 100) context.has_ordered = True if __name__ == '__main__': # creating time interval start = pd.Timestamp('2008-01-01', tz='UTC') end = pd.Timestamp('2013-01-01', tz='UTC') # loading the data input_data = load_bars_from_yahoo( stocks=stocks, start=start, end=end, ) algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data) results = algo.run(input_data)
def run_trading(self): algo = TradingAlgorithm(initialize=self.initialize, handle_data=self.handle_data) results = algo.run(self.data) return results
try: # Get the strongest 5 in momentum mom = returns_6m.T.sum(axis=1) selected_indices = mom[mom>0].order().tail(len(mom) /2).index # selected_indices = mom.index # selected_indices = mom[mom > 0 ].index selected_returns = returns_60d[selected_indices] weights = minimize_vol(selected_returns.T) # weights = minimize_vol(returns_60d.T) # Rebalance portfolio accordingly for stock, weight in zip(selected_returns.columns, weights): order_target_percent(stock, weight) except : # Sometimes this error is thrown # ValueError: Rank(A) < p or Rank([P; A; G]) < n pass # Instantinate algorithm algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data) # Run algorithm results = algo.run(dp.dropna()) ret_ports = pd.DataFrame() ret_ports[5] = results.portfolio_value ret_ports.plot(figsize=[20,10]) print results
ax2 = fig.add_subplot(312) portfolio_ratio = perf.portfolio_value/100000.0 portfolio_ratio.plot(ax=ax2, lw=2.) ax2.plot(buys.index, portfolio_ratio.ix[buys.index], '^', markersize=10, color='m') ax2.plot(sells.index, portfolio_ratio.ix[sells.index], 'v', markersize=10, color='k') # ax3 = fig.add_subplot(313) # perf.portfolio_value.plot(ax=ax3, lw=2.) # ax3.plot(buys.index, perf.portfolio_value.ix[buys.index], '^', markersize=10, color='m') # ax3.plot(sells.index, perf.portfolio_value.ix[sells.index], 'v', markersize=10, color='k') pass algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data) algo._analyze = analyze perf = algo.run(df) perf_trans = perf.ix[[t!=[] for t in perf.transactions]] buys = perf_trans.ix[[t[0]['amount'] > 0 for t in perf_trans.transactions]] sells = perf_trans.ix[[t[0]['amount'] < 0 for t in perf_trans.transactions]] investDays = validInvestDays(buys, sells, perf) print investDays cashes = perf.portfolio_value.ix[sells.index] returnRatArr = returnRatioArr(cashes.values) final_return_ratio = returnRatio(perf.portfolio_value[-1]) print '总收益率:', final_return_ratio print '年化收益率:', annualizedReturnRatio([final_return_ratio], T=investDays, D=250.0) from zipline.api import order_target, record, symbol, history, add_history
def main(): print("Aqui porra") alg_obj = TradingAlgorithm(initialize=initialize, handle_data=handle_data) perf_manual = alg_obj.run(DATA) perf_manual[["MA1","MA2","Price"]].plot()
def start_algo3(data): algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data) results = algo.run(data) return results, algo.perf_tracker.cumulative_risk_metrics