def test_bts_simulation_dt(self): code = """ def initialize(context): pass """ algo = TradingAlgorithm(script=code, sim_params=self.sim_params, env=self.env) algo.perf_tracker = PerformanceTracker( sim_params=self.sim_params, trading_calendar=self.trading_calendar, asset_finder=self.asset_finder, ) dt = pd.Timestamp("2016-08-04 9:13:14", tz='US/Eastern') algo_simulator = AlgorithmSimulator( algo, self.sim_params, self.data_portal, BeforeTradingStartsOnlyClock(dt), algo._create_benchmark_source(), NoRestrictions(), None ) # run through the algo's simulation list(algo_simulator.transform()) # since the clock only ever emitted a single before_trading_start # event, we can check that the simulation_dt was properly set self.assertEqual(dt, algo_simulator.simulation_dt)
def test_history_in_initialize(self): algo_text = dedent("""\ from zipline.api import history def initialize(context): history(10, '1d', 'price') def handle_data(context, data): pass """) start = pd.Timestamp('2007-04-05', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='daily', env=self.env, ) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=self.env, ) with self.assertRaises(HistoryInInitialize): test_algo.initialize()
def test_history_container_constructed_at_runtime(self): algo_text = dedent( """\ from zipline.api import history def handle_data(context, data): context.prices = history(2, '1d', 'price') """ ) start = pd.Timestamp('2007-04-05', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='daily' ) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params ) source = RandomWalkSource(start=start, end=end) self.assertIsNone(test_algo.history_container) test_algo.run(source) self.assertIsNotNone( test_algo.history_container, msg='HistoryContainer was not constructed at runtime', ) container = test_algo.history_container self.assertEqual( container.buffer_panel.window_length, Frequency.MAX_MINUTES['d'], msg='HistoryContainer.buffer_panel was not large enough to service' ' the given HistorySpec', ) self.assertEqual( len(container.digest_panels), 1, msg='The HistoryContainer created too many digest panels', ) freq, digest = list(container.digest_panels.items())[0] self.assertEqual( freq.unit_str, 'd', ) self.assertEqual( digest.window_length, 1, msg='The digest panel is not large enough to service the given' ' HistorySpec', )
def test_bts_simulation_dt(self): code = """ def initialize(context): pass """ algo = TradingAlgorithm( script=code, sim_params=self.sim_params, env=self.env, metrics=metrics.load('none'), ) algo.metrics_tracker = algo._create_metrics_tracker() benchmark_source = algo._create_benchmark_source() algo.metrics_tracker.handle_start_of_simulation(benchmark_source) dt = pd.Timestamp("2016-08-04 9:13:14", tz='US/Eastern') algo_simulator = AlgorithmSimulator( algo, self.sim_params, self.data_portal, BeforeTradingStartsOnlyClock(dt), benchmark_source, NoRestrictions(), None ) # run through the algo's simulation list(algo_simulator.transform()) # since the clock only ever emitted a single before_trading_start # event, we can check that the simulation_dt was properly set self.assertEqual(dt, algo_simulator.simulation_dt)
def test_history_passed_to_talib(self): """ Had an issue where MagicMock was causing errors during validation with talib. We don't officially support a talib integration, yet. But using talib directly should work. """ algo_text = """ import talib import numpy as np from zipline.api import history, add_history, record def initialize(context): add_history(2, '1d', 'price') def handle_data(context, data): prices = history(2, '1d', 'price') ma_result = talib.MA(np.asarray(prices[0]), timeperiod=2) record(ma=ma_result[-1]) """.strip() # April 2007 # Su Mo Tu We Th Fr Sa # 1 2 3 4 5 6 7 # 8 9 10 11 12 13 14 # 15 16 17 18 19 20 21 # 22 23 24 25 26 27 28 # 29 30 # Eddie: this was set to 04-10 but I don't see how that makes # sense as it does not generate enough data to get at -2 index # below. start = pd.Timestamp("2007-04-05", tz="UTC") end = pd.Timestamp("2007-04-10", tz="UTC") sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency="minute", emission_rate="daily", ) test_algo = TradingAlgorithm( script=algo_text, data_frequency="minute", sim_params=sim_params, env=TestHistoryAlgo.env ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) # At this point, just ensure that there is no crash. self.assertIsNotNone(output) recorded_ma = output.ix[-2, "ma"] self.assertFalse(pd.isnull(recorded_ma)) # Depends on seed np.testing.assert_almost_equal(recorded_ma, 159.76304468946876)
def test_history_in_initialize(self): algo_text = dedent( """\ from zipline.api import history def initialize(context): history(10, '1d', 'price') def handle_data(context, data): pass """ ) start = pd.Timestamp('2007-04-05', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='daily', env=self.env, ) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=self.env, ) with self.assertRaises(HistoryInInitialize): test_algo.initialize()
def test_minutely_fetcher(self): self.responses.add( self.responses.GET, 'https://fake.urls.com/aapl_minute_csv_data.csv', body=AAPL_MINUTE_CSV_DATA, content_type='text/csv', ) sim_params = factory.create_simulation_parameters( start=pd.Timestamp("2006-01-03", tz='UTC'), end=pd.Timestamp("2006-01-10", tz='UTC'), emission_rate="minute", data_frequency="minute" ) test_algo = TradingAlgorithm( script=""" from zipline.api import fetch_csv, record, sid def initialize(context): fetch_csv('https://fake.urls.com/aapl_minute_csv_data.csv') def handle_data(context, data): record(aapl_signal=data.current(sid(24), "signal")) """, sim_params=sim_params, data_frequency="minute", env=self.env) # manually setting data portal and getting generator because we need # the minutely emission packets here. TradingAlgorithm.run() only # returns daily packets. test_algo.data_portal = FetcherDataPortal(self.env, self.trading_calendar) gen = test_algo.get_generator() perf_packets = list(gen) signal = [result["minute_perf"]["recorded_vars"]["aapl_signal"] for result in perf_packets if "minute_perf" in result] self.assertEqual(6 * 390, len(signal)) # csv data is: # symbol,date,signal # aapl,1/4/06 5:31AM, 1 # aapl,1/4/06 11:30AM, 2 # aapl,1/5/06 5:31AM, 1 # aapl,1/5/06 11:30AM, 3 # aapl,1/9/06 5:31AM, 1 # aapl,1/9/06 11:30AM, 4 for dates 1/3 to 1/10 # 2 signals per day, only last signal is taken. So we expect # 390 bars of signal NaN on 1/3 # 390 bars of signal 2 on 1/4 # 390 bars of signal 3 on 1/5 # 390 bars of signal 3 on 1/6 (forward filled) # 390 bars of signal 4 on 1/9 # 390 bars of signal 4 on 1/9 (forward filled) np.testing.assert_array_equal([np.NaN] * 390, signal[0:390]) np.testing.assert_array_equal([2] * 390, signal[390:780]) np.testing.assert_array_equal([3] * 780, signal[780:1560]) np.testing.assert_array_equal([4] * 780, signal[1560:])
def test_history_daily_data_1m_window(self): algo_text = """ from zipline.api import history, add_history def initialize(context): add_history(bar_count=1, frequency='1m', field='price') def handle_data(context, data): prices = history(bar_count=3, frequency='1d', field='price') """.strip() start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-30', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end) with self.assertRaises(IncompatibleHistoryFrequency): algo = TradingAlgorithm( script=algo_text, data_frequency='daily', sim_params=sim_params ) source = RandomWalkSource(start=start, end=end) algo.run(source)
def get_results(self, algo_code): algo = TradingAlgorithm( script=algo_code, env=self.env, sim_params=self.sim_params ) return algo.run(self.data_portal)
def test_history_passed_to_talib(self): """ Had an issue where MagicMock was causing errors during validation with talib. We don't officially support a talib integration, yet. But using talib directly should work. """ algo_text = """ import talib import numpy as np from zipline.api import history, add_history, record def initialize(context): add_history(2, '1d', 'price') def handle_data(context, data): prices = history(2, '1d', 'price') ma_result = talib.MA(np.asarray(prices[0]), timeperiod=2) record(ma=ma_result[-1]) """.strip() # April 2007 # Su Mo Tu We Th Fr Sa # 1 2 3 4 5 6 7 # 8 9 10 11 12 13 14 # 15 16 17 18 19 20 21 # 22 23 24 25 26 27 28 # 29 30 # Eddie: this was set to 04-10 but I don't see how that makes # sense as it does not generate enough data to get at -2 index # below. start = pd.Timestamp('2007-04-05', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters(period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='daily') test_algo = TradingAlgorithm(script=algo_text, data_frequency='minute', sim_params=sim_params) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) # At this point, just ensure that there is no crash. self.assertIsNotNone(output) recorded_ma = output.ix[-2, 'ma'] self.assertFalse(pd.isnull(recorded_ma)) # Depends on seed np.testing.assert_almost_equal(recorded_ma, 159.76304468946876)
def run_algo(self, code, sim_params=None, data_frequency="daily"): if sim_params is None: sim_params = self.sim_params test_algo = TradingAlgorithm(script=code, sim_params=sim_params, env=self.env, data_frequency=data_frequency) results = test_algo.run(FetcherDataPortal(self.env)) return results
def test_history_container_constructed_at_runtime(self): algo_text = dedent("""\ from zipline.api import history def handle_data(context, data): context.prices = history(2, '1d', 'price') """) start = pd.Timestamp('2007-04-05', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters(period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='daily') test_algo = TradingAlgorithm(script=algo_text, data_frequency='minute', sim_params=sim_params) source = RandomWalkSource(start=start, end=end) self.assertIsNone(test_algo.history_container) test_algo.run(source) self.assertIsNotNone( test_algo.history_container, msg='HistoryContainer was not constructed at runtime', ) container = test_algo.history_container self.assertEqual( container.buffer_panel.window_length, Frequency.MAX_MINUTES['d'], msg='HistoryContainer.buffer_panel was not large enough to service' ' the given HistorySpec', ) self.assertEqual( len(container.digest_panels), 1, msg='The HistoryContainer created too many digest panels', ) freq, digest = list(container.digest_panels.items())[0] self.assertEqual( freq.unit_str, 'd', ) self.assertEqual( digest.window_length, 1, msg='The digest panel is not large enough to service the given' ' HistorySpec', )
def test_basic_history_positional_args(self): """ Ensure that positional args work. """ algo_text = """ import copy from zipline.api import history, add_history def initialize(context): add_history(2, '1d', 'price') def handle_data(context, data): prices = history(2, '1d', 'price') context.last_prices = copy.deepcopy(prices) """.strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-21', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) self.assertIsNotNone(output) last_prices = test_algo.last_prices[0] oldest_dt = pd.Timestamp( '2006-03-20 4:00 PM', tz='US/Eastern').tz_convert('UTC') newest_dt = pd.Timestamp( '2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC') self.assertEquals(oldest_dt, last_prices.index[0]) self.assertEquals(newest_dt, last_prices.index[-1]) self.assertEquals(139.36946942498648, last_prices[oldest_dt]) self.assertEquals(180.15661995395106, last_prices[newest_dt])
def test_basic_history_one_day(self): algo_text = """ from zipline.api import history, add_history def initialize(context): add_history(bar_count=1, frequency='1d', field='price') def handle_data(context, data): prices = history(bar_count=1, frequency='1d', field='price') context.last_prices = prices """.strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-21', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) self.assertIsNotNone(output) last_prices = test_algo.last_prices[0] # oldest and newest should be the same if there is only 1 bar oldest_dt = pd.Timestamp( '2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC') newest_dt = pd.Timestamp( '2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC') self.assertEquals(oldest_dt, last_prices.index[0]) self.assertEquals(newest_dt, last_prices.index[-1]) # Random, depends on seed self.assertEquals(180.15661995395106, last_prices[oldest_dt]) self.assertEquals(180.15661995395106, last_prices[newest_dt])
def test_current_contract_in_algo(self): code = dedent(""" from zipline.api import ( record, continuous_future, schedule_function, get_datetime, ) def initialize(algo): algo.primary_cl = continuous_future('FO', 0, 'calendar') algo.secondary_cl = continuous_future('FO', 1, 'calendar') schedule_function(record_current_contract) def record_current_contract(algo, data): record(datetime=get_datetime()) record(primary=data.current(algo.primary_cl, 'contract')) record(secondary=data.current(algo.secondary_cl, 'contract')) """) algo = TradingAlgorithm(script=code, sim_params=self.sim_params, trading_calendar=self.trading_calendar, env=self.env) results = algo.run(self.data_portal) result = results.iloc[0] self.assertEqual(result.primary.symbol, 'FOF16', 'Primary should be FOF16 on first session.') self.assertEqual(result.secondary.symbol, 'FOG16', 'Secondary should be FOG16 on first session.') result = results.iloc[1] # Second day, primary should switch to FOG self.assertEqual(result.primary.symbol, 'FOG16', 'Primary should be FOG16 on second session, auto ' 'close is at beginning of the session.') self.assertEqual(result.secondary.symbol, 'FOH16', 'Secondary should be FOH16 on second session, auto ' 'close is at beginning of the session.') result = results.iloc[2] # Second day, primary should switch to FOG self.assertEqual(result.primary.symbol, 'FOG16', 'Primary should remain as FOG16 on third session.') self.assertEqual(result.secondary.symbol, 'FOH16', 'Secondary should remain as FOH16 on third session.')
def test_history_daily(self): bar_count = 3 algo_text = """ from zipline.api import history, add_history def initialize(context): add_history(bar_count={bar_count}, frequency='1d', field='price') context.history_trace = [] def handle_data(context, data): prices = history(bar_count={bar_count}, frequency='1d', field='price') context.history_trace.append(prices) """.format(bar_count=bar_count).strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-30', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end, data_frequency='daily', env=self.env, ) _, df = factory.create_test_df_source(sim_params, self.env) df = df.astype(np.float64) source = DataFrameSource(df) test_algo = TradingAlgorithm( script=algo_text, data_frequency='daily', sim_params=sim_params, env=TestHistoryAlgo.env, ) output = test_algo.run(source) self.assertIsNotNone(output) df.columns = self.env.asset_finder.retrieve_all(df.columns) for i, received in enumerate(test_algo.history_trace[bar_count - 1:]): expected = df.iloc[i:i + bar_count] assert_frame_equal(expected, received)
def test_history_passed_to_func(self): """ Had an issue where MagicMock was causing errors during validation with rolling mean. """ algo_text = """ from zipline.api import history, add_history import pandas as pd def initialize(context): add_history(2, '1d', 'price') def handle_data(context, data): prices = history(2, '1d', 'price') pd.rolling_mean(prices, 2) """.strip() # April 2007 # Su Mo Tu We Th Fr Sa # 1 2 3 4 5 6 7 # 8 9 10 11 12 13 14 # 15 16 17 18 19 20 21 # 22 23 24 25 26 27 28 # 29 30 start = pd.Timestamp('2007-04-10', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='minute' ) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) # At this point, just ensure that there is no crash. self.assertIsNotNone(output)
def main(): with open(api.__file__.rstrip('c') + 'i', 'w') as stub: # Imports so that Asset et al can be resolved. # "from MOD import *" will re-export the imports from the stub, so # explicitly importing. stub.write(dedent("""\ from zipline.assets import Asset, Equity, Future from zipline.assets.futures import FutureChain from zipline.finance.cancel_policy import CancelPolicy from zipline.pipeline import Pipeline from zipline.protocol import Order from zipline.utils.events import EventRule """)) # Sort to generate consistent stub file: for api_func in sorted(TradingAlgorithm.all_api_methods(), key=attrgetter('__name__')): sig = inspect._signature_bound_method(inspect.signature(api_func)) indent = ' ' * 4 stub.write(dedent('''\ def {func_name}{func_sig}: """'''.format(func_name=api_func.__name__, func_sig=sig))) stub.write(dedent('{indent}{func_doc}'.format( func_doc=api_func.__doc__ or '\n', # handle None docstring indent=indent, ))) stub.write('{indent}"""\n\n'.format(indent=indent))
def main(): with open(api.__file__.rstrip('c') + 'i', 'w') as stub: # Imports so that Asset et al can be resolved. # "from MOD import *" will re-export the imports from the stub, so # explicitly importing. stub.write( dedent("""\ from zipline.assets import Asset, Equity, Future from zipline.assets.futures import FutureChain from zipline.finance.cancel_policy import CancelPolicy from zipline.pipeline import Pipeline from zipline.protocol import Order from zipline.utils.events import EventRule """)) # Sort to generate consistent stub file: for api_func in sorted(TradingAlgorithm.all_api_methods(), key=attrgetter('__name__')): sig = inspect._signature_bound_method(inspect.signature(api_func)) indent = ' ' * 4 stub.write( dedent('''\ def {func_name}{func_sig}: """'''.format(func_name=api_func.__name__, func_sig=sig))) stub.write( dedent('{indent}{func_doc}'.format( func_doc=api_func.__doc__ or '\n', # handle None docstring indent=indent, ))) stub.write('{indent}"""\n\n'.format(indent=indent))
def run_algo_single(**algo_descr): if 'constraint_func' in algo_descr: if algo_descr['constraint_func'](algo_descr['param_set']): return np.nan try: algo = TradingAlgorithm(initialize=algo_descr['initialize'], handle_data=algo_descr['handle_data'], **algo_descr['param_set']) perf = algo.run(algo_descr['data']) daily_rets = perf.portfolio_value.pct_change().dropna() if daily_rets.std() > 0: sharpe_ratio_calc = daily_rets.mean() / daily_rets.std() * np.sqrt( 252) else: sharpe_ratio_calc = -999 risk_report = algo.risk_report risk_cum = pd.Series( algo.perf_tracker.cumulative_risk_metrics.to_dict()) except ImportError as e: print(e) return np.nan # Apply objective functions objective = algo_descr.get('objective', 'none') if objective == 'none': obj = (perf, risk_cum, risk_report) elif objective == 'sharpe': obj = sharpe_ratio_calc elif objective == 'total_return': obj = perf['portfolio_value'][-1] / perf['portfolio_value'][0] - 1 elif callable(objective): obj = objective(perf, risk_cum, risk_report) else: raise NotImplemented('Objective %s not implemented.' % algo_descr['objective']) print "Sharpe: " + str(sharpe_ratio_calc) + " %_Return: " + str( perf.portfolio_value[-1] / perf.portfolio_value[0] - 1) + " MaxDD: " + str(perf.max_drawdown[-1]) + " MaxExp: " + str( perf.max_leverage[-1]) return obj
def test_history_grow_length_intra_bar(self, incr): """ Tests growing the length of a digest panel with different date_buf deltas in a single bar. """ algo_text = dedent( """\ from zipline.api import history def initialize(context): context.bar_count = 1 def handle_data(context, data): prices = history(context.bar_count, '1d', 'price') context.test_case.assertEqual(len(prices), context.bar_count) context.bar_count += {incr} prices = history(context.bar_count, '1d', 'price') context.test_case.assertEqual(len(prices), context.bar_count) """ ).format(incr=incr) start = pd.Timestamp('2007-04-05', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='daily', env=self.env, ) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=self.env, ) test_algo.test_case = self source = RandomWalkSource(start=start, end=end) self.assertIsNone(test_algo.history_container) test_algo.run(source)
def create_algo(self, code, filename=None, sim_params=None): if sim_params is None: sim_params = self.sim_params return TradingAlgorithm(script=code, sim_params=sim_params, env=self.env, algo_filename=filename)
def test_history_with_open(self): algo_text = """ from zipline.api import history, add_history, record def initialize(context): add_history(3, '1d', 'open_price') def handle_data(context, data): opens = history(3, '1d', 'open_price') record(current_open=opens[0].ix[-1]) """.strip() # April 2007 # Su Mo Tu We Th Fr Sa # 1 2 3 4 5 6 7 # 8 9 10 11 12 13 14 # 15 16 17 18 19 20 21 # 22 23 24 25 26 27 28 # 29 30 start = pd.Timestamp('2007-04-10', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='minute' ) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) np.testing.assert_equal(output.ix[0, 'current_open'], 99.991436939669939)
def test_history_with_volume(self): algo_text = """ from zipline.api import history, add_history, record def initialize(context): add_history(3, '1d', 'volume') def handle_data(context, data): volume = history(3, '1d', 'volume') record(current_volume=volume[0].ix[-1]) """.strip() # April 2007 # Su Mo Tu We Th Fr Sa # 1 2 3 4 5 6 7 # 8 9 10 11 12 13 14 # 15 16 17 18 19 20 21 # 22 23 24 25 26 27 28 # 29 30 start = pd.Timestamp('2007-04-10', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='minute' ) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) np.testing.assert_equal(output.ix[0, 'current_volume'], 212218404.0)
def markowitz(stocks, cash): warnings.filterwarnings("once") solvers.options['show_progress'] = False end = pd.Timestamp.utcnow() start = end - 50 * pd.tseries.offsets.BDay() data = load_bars_from_yahoo(stocks=stocks, start=start, end=end) # Instantinate algorithm algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data, cash=cash) # Run algorithm results = algo.run(data) # portfolio value plot raw_plot = results.portfolio_value.plot() raw_fig = raw_plot.get_figure() returns_plot = mpld3.fig_to_html(raw_fig) raw_fig.clf() #stock price plot raw_price_data = data.loc[:, :, 'price'].pct_change(1).fillna(0).applymap( lambda x: x + 1).cumprod().applymap(lambda x: x * 100) raw_price_plot = raw_price_data.plot(figsize=(8, 5)) raw_price_fig = raw_price_plot.get_figure() price_plot = mpld3.fig_to_html(raw_price_fig) raw_price_fig.clf() #final returns equalweight_returns = sum(map(list, raw_price_data.tail(1).values)[0]) / 4 - 100 equalweight_returns = '{0:.2f}%'.format(float(equalweight_returns)) optimal_returns = (results.portfolio_value.tail(1).iloc[0] - 100000) / 1000 optimal_returns = '{0:.2f}%'.format(float(optimal_returns)) #efficient frontier plot frontier_plot_data = open("plot.png", "rb").read() # serialize to HTTP response frontier_plot = HttpResponse(frontier_plot_data, content_type="image/png") return (results, returns_plot, price_plot, frontier_plot, equalweight_returns, optimal_returns)
def test_basic_history(self): algo_text = """ from zipline.api import history, add_history def initialize(context): add_history(bar_count=2, frequency='1d', field='price') def handle_data(context, data): prices = history(bar_count=2, frequency='1d', field='price') prices['prices_times_two'] = prices[1] * 2 context.last_prices = prices """.strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp("2006-03-20", tz="UTC") end = pd.Timestamp("2006-03-21", tz="UTC") sim_params = factory.create_simulation_parameters(start=start, end=end) test_algo = TradingAlgorithm( script=algo_text, data_frequency="minute", sim_params=sim_params, env=TestHistoryAlgo.env ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) self.assertIsNotNone(output) last_prices = test_algo.last_prices[0] oldest_dt = pd.Timestamp("2006-03-20 4:00 PM", tz="US/Eastern").tz_convert("UTC") newest_dt = pd.Timestamp("2006-03-21 4:00 PM", tz="US/Eastern").tz_convert("UTC") self.assertEquals(oldest_dt, last_prices.index[0]) self.assertEquals(newest_dt, last_prices.index[-1]) # Random, depends on seed self.assertEquals(139.36946942498648, last_prices[oldest_dt]) self.assertEquals(180.15661995395106, last_prices[newest_dt])
def __init__(self, model, pre_defined_assets, equity_data, other_data, training_strategy, pre_trained_model_path=None, name='backtest', log_interval=1, transaction_cost=0.005, *args, **kwargs): TradingAlgorithm.__init__(self, *args, **kwargs) self.model = model self.assets = pre_defined_assets self.transaction_cost = transaction_cost self.training_strategy = training_strategy self.other_training_data = other_data self.equity_data = equity_data self.log_dir = 'log/' + name self.log_interval = log_interval self.real_return = [] self.history_weight = [] if pre_trained_model_path == None: self.model.init_model() else: self.model.load_model(pre_trained_model_path) self.day = 1 self.backtest_action_record = [] self.tensorboard = TensorBoard(log_dir=self.log_dir, session=self.model.get_session())
def markowitz(stocks, cash): warnings.filterwarnings("once") solvers.options['show_progress'] = False end = pd.Timestamp.utcnow() start = end - 50 * pd.tseries.offsets.BDay() data = load_bars_from_yahoo(stocks=stocks, start=start, end=end) # Instantinate algorithm algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data, cash=cash) # Run algorithm results = algo.run(data) # portfolio value plot raw_plot = results.portfolio_value.plot() raw_fig = raw_plot.get_figure() returns_plot = mpld3.fig_to_html(raw_fig) raw_fig.clf() #stock price plot raw_price_data = data.loc[:, :, 'price'].pct_change(1).fillna(0).applymap(lambda x: x + 1).cumprod().applymap(lambda x: x * 100) raw_price_plot = raw_price_data.plot(figsize=(8,5)) raw_price_fig = raw_price_plot.get_figure() price_plot = mpld3.fig_to_html(raw_price_fig) raw_price_fig.clf() #final returns equalweight_returns = sum(map(list, raw_price_data.tail(1).values)[0]) / 4 - 100 equalweight_returns = '{0:.2f}%'.format(float(equalweight_returns)) optimal_returns = (results.portfolio_value.tail(1).iloc[0] - 100000) / 1000 optimal_returns = '{0:.2f}%'.format(float(optimal_returns)) #efficient frontier plot frontier_plot_data = open("plot.png", "rb").read() # serialize to HTTP response frontier_plot = HttpResponse(frontier_plot_data, content_type="image/png") return(results, returns_plot, price_plot, frontier_plot, equalweight_returns, optimal_returns)
def run_algo_single(**algo_descr): if 'constraint_func' in algo_descr: if algo_descr['constraint_func'](algo_descr['param_set']): return np.nan try: algo = TradingAlgorithm(initialize=algo_descr['initialize'], handle_data=algo_descr['handle_data'], **algo_descr['param_set'] ) perf = algo.run(algo_descr['data']) daily_rets = perf.portfolio_value.pct_change().dropna() if daily_rets.std() > 0: sharpe_ratio_calc = daily_rets.mean() / daily_rets.std() * np.sqrt(252) else: sharpe_ratio_calc = -999 risk_report = algo.risk_report risk_cum = pd.Series(algo.perf_tracker.cumulative_risk_metrics.to_dict()) except ImportError as e: print(e) return np.nan # Apply objective functions objective = algo_descr.get('objective', 'none') if objective == 'none': obj = (perf, risk_cum, risk_report) elif objective == 'sharpe': obj = sharpe_ratio_calc elif objective == 'total_return': obj = perf['portfolio_value'][-1] / perf['portfolio_value'][0] - 1 elif callable(objective): obj = objective(perf, risk_cum, risk_report) else: raise NotImplemented('Objective %s not implemented.' % algo_descr['objective']) print "Sharpe: " + str(sharpe_ratio_calc) + " %_Return: " + str(perf.portfolio_value[-1]/perf.portfolio_value[0]-1) + " MaxDD: " + str(perf.max_drawdown[-1]) + " MaxExp: " + str(perf.max_leverage[-1]) return obj
def test_history_with_high(self): algo_text = """ from zipline.api import history, add_history, record def initialize(context): add_history(3, '1d', 'high') def handle_data(context, data): highs = history(3, '1d', 'high') record(current_high=highs[0].ix[-1]) """.strip() # April 2007 # Su Mo Tu We Th Fr Sa # 1 2 3 4 5 6 7 # 8 9 10 11 12 13 14 # 15 16 17 18 19 20 21 # 22 23 24 25 26 27 28 # 29 30 start = pd.Timestamp("2007-04-10", tz="UTC") end = pd.Timestamp("2007-04-10", tz="UTC") sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency="minute", emission_rate="minute", ) test_algo = TradingAlgorithm( script=algo_text, data_frequency="minute", sim_params=sim_params, env=TestHistoryAlgo.env ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) np.testing.assert_equal(output.ix[0, "current_high"], 139.5370641791925)
def test_history_container_constructed_at_runtime(self, data_freq): algo_text = dedent( """\ from zipline.api import history def handle_data(context, data): context.prices = history(2, '1d', 'price') """ ) start = pd.Timestamp("2007-04-05", tz="UTC") end = pd.Timestamp("2007-04-10", tz="UTC") sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency=data_freq, emission_rate=data_freq, ) test_algo = TradingAlgorithm( script=algo_text, data_frequency=data_freq, sim_params=sim_params, env=TestHistoryAlgo.env ) source = RandomWalkSource(start=start, end=end, freq=data_freq) self.assertIsNone(test_algo.history_container) test_algo.run(source) self.assertIsNotNone(test_algo.history_container, msg="HistoryContainer was not constructed at runtime") container = test_algo.history_container self.assertEqual(len(container.digest_panels), 1, msg="The HistoryContainer created too many digest panels") freq, digest = list(container.digest_panels.items())[0] self.assertEqual(freq.unit_str, "d") self.assertEqual( digest.window_length, 1, msg="The digest panel is not large enough to service the given" " HistorySpec" )
def main(): with open(api.__file__.rstrip("c") + "i", "w") as stub: # Imports so that Asset et al can be resolved. # "from MOD import *" will re-export the imports from the stub, so # explicitly importing. stub.write( dedent( """\ import collections from zipline.assets import Asset, Equity, Future from zipline.assets.futures import FutureChain from zipline.finance.asset_restrictions import Restrictions from zipline.finance.cancel_policy import CancelPolicy from zipline.pipeline import Pipeline from zipline.protocol import Order from zipline.utils.events import EventRule from zipline.utils.security_list import SecurityList """ ) ) # Sort to generate consistent stub file: for api_func in sorted( TradingAlgorithm.all_api_methods(), key=attrgetter("__name__") ): stub.write("\n") sig = inspect._signature_bound_method(inspect.signature(api_func)) indent = " " * 4 stub.write( dedent( '''\ def {func_name}{func_sig}: """'''.format( func_name=api_func.__name__, func_sig=sig ) ) ) stub.write( dedent( "{indent}{func_doc}".format( # `or '\n'` is to handle a None docstring: func_doc=dedent(api_func.__doc__.lstrip()) or "\n", indent=indent, ) ) ) stub.write('{indent}"""\n'.format(indent=indent))
def startbacktest(): starttime = request.args.get('starttime') endtime = request.args.get('endtime') code = request.args.get('code') #处理代码 old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = mystdout = StringIO() sys.stderr = mystderr = StringIO() try: algo = TradingAlgorithm(script=code, startdate=starttime,enddate=endtime,capital_base=10000,benchmark='sz399004') results = algo.run(input_data) print results except Exception as error: print('caught this error: ' + repr(error)) sys.stdout = old_stdout sys.stderr = old_stderr print mystdout.getvalue() print mystderr.getvalue() json_results = [] return toJson(json_results)
def test_current_chain_in_algo(self): code = dedent(""" from zipline.api import ( record, continuous_future, schedule_function, get_datetime, ) def initialize(algo): algo.primary_cl = continuous_future('FO', 0, 'calendar') algo.secondary_cl = continuous_future('FO', 1, 'calendar') schedule_function(record_current_contract) def record_current_contract(algo, data): record(datetime=get_datetime()) primary_chain = data.current_chain(algo.primary_cl) secondary_chain = data.current_chain(algo.secondary_cl) record(primary_len=len(primary_chain)) record(primary_first=primary_chain[0].symbol) record(primary_last=primary_chain[-1].symbol) record(secondary_len=len(secondary_chain)) record(secondary_first=secondary_chain[0].symbol) record(secondary_last=secondary_chain[-1].symbol) """) algo = TradingAlgorithm(script=code, sim_params=self.sim_params, trading_calendar=self.trading_calendar, env=self.env) results = algo.run(self.data_portal) result = results.iloc[0] self.assertEqual(result.primary_len, 4, 'There should be only 4 contracts in the chain for ' 'the primary, there are 5 contracts defined in the ' 'fixture, but one has a start after the simulation ' 'date.') self.assertEqual(result.secondary_len, 3, 'There should be only 3 contracts in the chain for ' 'the primary, there are 5 contracts defined in the ' 'fixture, but one has a start after the simulation ' 'date. And the first is not included because it is ' 'the primary on that date.') self.assertEqual(result.primary_first, 'FOF16', 'Front of primary chain should be FOF16 on first ' 'session.') self.assertEqual(result.secondary_first, 'FOG16', 'Front of secondary chain should be FOG16 on first ' 'session.') self.assertEqual(result.primary_last, 'FOJ16', 'End of primary chain should be FOJ16 on first ' 'session.') self.assertEqual(result.secondary_last, 'FOJ16', 'End of secondary chain should be FOJ16 on first ' 'session.') # Second day, primary should switch to FOG result = results.iloc[1] self.assertEqual(result.primary_len, 3, 'There should be only 3 contracts in the chain for ' 'the primary, there are 5 contracts defined in the ' 'fixture, but one has a start after the simulation ' 'date. The first is not included because of roll.') self.assertEqual(result.secondary_len, 2, 'There should be only 2 contracts in the chain for ' 'the primary, there are 5 contracts defined in the ' 'fixture, but one has a start after the simulation ' 'date. The first is not included because of roll, ' 'the second is the primary on that date.') self.assertEqual(result.primary_first, 'FOG16', 'Front of primary chain should be FOG16 on second ' 'session.') self.assertEqual(result.secondary_first, 'FOH16', 'Front of secondary chain should be FOH16 on second ' 'session.') # These values remain FOJ16 because fixture data is not exhaustive # enough to move the end of the chain. self.assertEqual(result.primary_last, 'FOJ16', 'End of primary chain should be FOJ16 on second ' 'session.') self.assertEqual(result.secondary_last, 'FOJ16', 'End of secondary chain should be FOJ16 on second ' 'session.')
from datetime import datetime import pytz from zipline import TradingAlgorithm from zipline.utils.factory import load_from_yahoo from zipline.api import order def initialize(context): context.test = 10 def handle_date(context, data): order('AAPL', 10) print(context.test) if __name__ == '__main__': import pylab as pl start = datetime(2008, 1, 1, 0, 0, 0, 0, pytz.utc) end = datetime(2010, 1, 1, 0, 0, 0, 0, pytz.utc) data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start, end=end) data = data.dropna() algo = TradingAlgorithm(initialize=initialize, handle_data=handle_date) results = algo.run(data) results.portfolio_value.plot() pl.show()
def start_algo3(data): algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data) results = algo.run(data) return results, algo.perf_tracker.cumulative_risk_metrics
from zipline import TradingAlgorithm from zipline.api import order, sid from zipline.data.loader import load_bars_from_yahoo # creating time interval start = pd.Timestamp('2008-01-01', tz='UTC') end = pd.Timestamp('2013-01-01', tz='UTC') # loading the data input_data = load_bars_from_yahoo( stocks=['AAPL', 'MSFT'], start=start, end=end, ) #checking if I can merge this def initialize(context): context.has_ordered = False def handle_data(context, data): if not context.has_ordered: for stock in data: order(sid(stock), 100) context.has_ordered = True algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data) results = algo.run(input_data)
try: # Get the strongest 5 in momentum mom = returns_6m.T.sum(axis=1) selected_indices = mom[mom>0].order().tail(len(mom) /2).index # selected_indices = mom.index # selected_indices = mom[mom > 0 ].index selected_returns = returns_60d[selected_indices] weights = minimize_vol(selected_returns.T) # weights = minimize_vol(returns_60d.T) # Rebalance portfolio accordingly for stock, weight in zip(selected_returns.columns, weights): order_target_percent(stock, weight) except : # Sometimes this error is thrown # ValueError: Rank(A) < p or Rank([P; A; G]) < n pass # Instantinate algorithm algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data) # Run algorithm results = algo.run(dp.dropna()) ret_ports = pd.DataFrame() ret_ports[5] = results.portfolio_value ret_ports.plot(figsize=[20,10]) print results
end_date=pd.to_datetime("2002-01-01 00:00:00"), symbols=['AA', 'CC']) exchanges_t = pd.DataFrame({'exchange': ['HS_SZ'], 'timezone': ['Asia/Shanghai']}) ''' trading.environment = TradingEnvironment(load=load_t, bm_symbol='^HSI', exchange_tz='Asia/Shanghai') '''trading.environment.write_data(equities=equities_t, exchanges=exchanges_t)''' # Bug in code doesn't set tz if these are not specified # (finance/trading.py:SimulationParameters.calculate_first_open[close]) # .tz_localize("Asia/Shanghai").tz_convert("UTC") a = pd.to_datetime("2001-01-01 00:00:00").tz_localize("Asia/Shanghai") sim_params = create_simulation_parameters( start=pd.to_datetime("2001-01-01 00:00:00").tz_localize("Asia/Shanghai"), end=pd.to_datetime("2001-09-21 00:00:00").tz_localize("Asia/Shanghai"), data_frequency="daily", emission_rate="daily", env=trading.environment) algor_obj = TradingAlgorithm(initialize=initialize, handle_data=handle_data, sim_params=sim_params, env=trading.environment) if __name__ =='__main__': import sys import pytz import matplotlib.pyplot as plt from zipline import TradingAlgorithm from zipline.utils.factory import load_from_yahoo import argparse parser = argparse.ArgumentParser(description='predict/test using similarity-prediction') parser.add_argument('-t', '--ticker', action='store', default='AAPL', help='tickers to predict/test') parser.add_argument('-m', '--mamethod', action='store', choices=['ema','ma'], default='ema', help='ma method to pre-process the Close/Volume') parser.add_argument('-p', '--maperiod', action='store', type=int, default=20, help='period to ma Close/Volume') parser.add_argument('-w', '--window', action='store', type=int, default=20, help='window size to match') parser.add_argument('-a', '--lookahead', action='store', type=int, default=1, help='days to lookahead when predict')
def test_history_in_bts_price_minutes(self): """ Test calling history() in before_trading_start() with minutely price bars. """ algo_text = """ from zipline.api import history def initialize(context): context.first_bts_call = True def before_trading_start(context, data): if not context.first_bts_call: price_bts = history(bar_count=1, frequency='1m', field='price') context.price_bts = price_bts context.first_bts_call = False def handle_data(context, data): pass """.strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-22', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) self.assertIsNotNone(output) # Get the prices recorded by history() within BTS price_bts_0 = test_algo.price_bts[0] price_bts_1 = test_algo.price_bts[1] # The prices recorded by history() in BTS should # be the closing price of the previous day, which are: # # sid | close on 2006-03-21 # ---------------------------- # 0 | 180.15661995395106 # 1 | 578.41665003444723 # These are not 'real' price values. They are the product of # RandonWalkSource, which produces random walk OHLCV timeseries. For a # given seed these values are deterministc. self.assertEquals(180.15661995395106, price_bts_0.ix[0]) self.assertEquals(578.41665003444723, price_bts_1.ix[0])
def test_history_in_bts_volume_days(self, data_freq): """ Test calling history() in before_trading_start() with daily volume bars. """ algo_text = """ from zipline.api import history def initialize(context): context.first_bts_call = True def before_trading_start(context, data): if not context.first_bts_call: volume_bts = history(bar_count=2, frequency='1d', field='volume') context.volume_bts = volume_bts context.first_bts_call = False def handle_data(context, data): volume_hd = history(bar_count=2, frequency='1d', field='volume') context.volume_hd = volume_hd """.strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-22', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end, data_frequency=data_freq) test_algo = TradingAlgorithm( script=algo_text, data_frequency=data_freq, sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end, freq=data_freq) output = test_algo.run(source) self.assertIsNotNone(output) # Get the volume recorded by history() within handle_data() volume_hd_0 = test_algo.volume_hd[0] volume_hd_1 = test_algo.volume_hd[1] # Get the volume recorded by history() within BTS volume_bts_0 = test_algo.volume_bts[0] volume_bts_1 = test_algo.volume_bts[1] penultimate_hd_dt = pd.Timestamp( '2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC') # Midnight of the day on which BTS is invoked. newest_bts_dt = normalize_date(pd.Timestamp( '2006-03-22 04:00 PM', tz='US/Eastern').tz_convert('UTC')) if data_freq == 'daily': # If we're dealing with daily data, then we record # canonicalized timestamps, so make conversion here: penultimate_hd_dt = normalize_date(penultimate_hd_dt) # When history() is called in BTS, its 'current' volume value # should equal the sum of the previous day. self.assertEquals(volume_hd_0[penultimate_hd_dt], volume_bts_0[newest_bts_dt]) self.assertEquals(volume_hd_1[penultimate_hd_dt], volume_bts_1[newest_bts_dt])
def test_history_in_bts_volume_minutes(self): """ Test calling history() in before_trading_start() with minutely volume bars. """ algo_text = """ from zipline.api import history def initialize(context): context.first_bts_call = True def before_trading_start(context, data): if not context.first_bts_call: volume_bts = history(bar_count=2, frequency='1m', field='volume') context.volume_bts = volume_bts context.first_bts_call = False def handle_data(context, data): pass """.strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-22', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) self.assertIsNotNone(output) # Get the volumes recorded for sid 0 by history() within BTS volume_bts_0 = test_algo.volume_bts[0] # Get the volumes recorded for sid 1 by history() within BTS volume_bts_1 = test_algo.volume_bts[1] # The values recorded on 2006-03-22 by history() in BTS # should equal the final volume values for the trading # day 2006-03-21: # 0 1 # 2006-03-21 20:59:00 215548 439908 # 2006-03-21 21:00:00 985645 664313 # # Note: These are not 'real' volume values. They are the product of # RandonWalkSource, which produces random walk OHLCV timeseries. For a # given seed these values are deterministc. self.assertEquals(215548, volume_bts_0.ix[0]) self.assertEquals(985645, volume_bts_0.ix[1]) self.assertEquals(439908, volume_bts_1.ix[0]) self.assertEquals(664313, volume_bts_1.ix[1])
print get_datetime(), closeprice[sid(stock)][0], closeprice[sid( stock)][1], closeprice[sid(stock)][2], closeprice[sid( stock)][3], closeprice[sid(stock)][4] #print closeprice,closeprice[sid(stock)][1] if closeprice[sid(stock)][-2] > closeprice[sid( stock)][-3] and closeprice[sid(stock)][-3] > closeprice[ sid(stock)][-4]: print "buy", get_datetime() order(stock, 300) elif closeprice[sid(stock)][-2] < closeprice[sid( stock)][-3] and closeprice[sid(stock)][-3] < closeprice[ sid(stock)][-4]: print "sell", get_datetime() order(stock, -300) # capital_base is the base value of capital # algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data, capital_base=10000, benchmark='399004') #print input_data #api: print all the api function #print algo.all_api_methods() results = algo.run(input_data) print results #print results['benchmark_period_return'],results['portfolio_value'] analyze(results=results)
# Plot the portfolio and asset data. ax1 = plt.subplot(211) #211表示2行1列两个子图,现在正在画第一个portfolio_value图,ax是一个Axes对象 results['portfolio_value'].plot( ax=ax1, color='b', grid=True, linewidth=1.6 ) # results的index是日期,根据输出的csv结果,有列名SYMBOL600111,algo_volatility,algorithm_period_return,alpha,benchmark_period_return,benchmark_volatility,beta,capital_used,ending_cash,ending_exposure,ending_value,excess_return,gross_leverage,information,long_exposure,long_value,longs_count,max_drawdown,max_leverage,net_leverage,orders,pnl,portfolio_value,positions,returns,sharpe,short_exposure,short_value,shorts_count,sortino,starting_cash,starting_exposure,starting_value,trading_days,transactions,treasury_period_return ax1.set_ylabel('Portfolio Performance') #把y轴命名为Portfolio Performance ax2 = plt.subplot( 212, sharex=ax1) #取得另外一个Axes对象,212表示2行1列两个子图,现在正在画第二个get(SYMBOL)图 results.get(SYMBOL).plot(ax=ax2, grid=True, linewidth=1.6) #dataframe.get方法,SYMBOL可以改为 ax2.set_ylabel('%s price ' % SYMBOL) #把第二个子图的名字命名为price,后面那个取SYMBOL的名字 results.to_csv('aaaaaaaaaaaaaa.csv') # Show the plot. plt.gcf().set_size_inches( 18, 8) # plt.gcf():Get a reference to the current figure.设置尺寸 plt.show() bars = get_data_from_tushare( # Using tushare to get data which could be handled by zipline later. stocks=[SYMBOL], start="2015-10-09", end="2016-02-07", ) #bars是一个DataFrame类型,索引就是日期,还有一列是'600111'的收盘价,从tushare调用数据 algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data, identifiers=[SYMBOL]) perf = algo.run(bars) #performance是一个DataFrame类型 plot_function(results=perf)
security.symbol: data[:data.index[0] + datetime.timedelta(days=10)] }) count += 1 print str(count) + " : " + security.symbol except: continue def initialize(context): context.stock = symbol(security.symbol) context.traded = False def handle_data(context, data): if context.traded: return print data order(context.stock, 10000) order(context.stock, -10000, style=LimitOrder(data[context.stock]['close'] * 1.1)) context.traded = True # NOTE: This cell will take a few minutes to run. # Create algorithm object passing in initialize and # handle_data functions algo_obj = TradingAlgorithm(initialize=initialize, handle_data=handle_data) # Run algorithm perf_manual[security.symbol] = algo_obj.run(data)
def main(): print("Aqui porra") alg_obj = TradingAlgorithm(initialize=initialize, handle_data=handle_data) perf_manual = alg_obj.run(DATA) perf_manual[["MA1","MA2","Price"]].plot()
def test_history_in_bts_price_days(self, data_freq): """ Test calling history() in before_trading_start() with daily price bars. """ algo_text = """ from zipline.api import history def initialize(context): context.first_bts_call = True def before_trading_start(context, data): if not context.first_bts_call: prices_bts = history(bar_count=3, frequency='1d', field='price') context.prices_bts = prices_bts context.first_bts_call = False def handle_data(context, data): prices_hd = history(bar_count=3, frequency='1d', field='price') context.prices_hd = prices_hd """.strip() # March 2006 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start = pd.Timestamp('2006-03-20', tz='UTC') end = pd.Timestamp('2006-03-22', tz='UTC') sim_params = factory.create_simulation_parameters( start=start, end=end, data_frequency=data_freq) test_algo = TradingAlgorithm( script=algo_text, data_frequency=data_freq, sim_params=sim_params, env=TestHistoryAlgo.env, ) source = RandomWalkSource(start=start, end=end, freq=data_freq) output = test_algo.run(source) self.assertIsNotNone(output) # Get the prices recorded by history() within handle_data() prices_hd = test_algo.prices_hd[0] # Get the prices recorded by history() within BTS prices_bts = test_algo.prices_bts[0] # before_trading_start() is timestamp'd to midnight prior to # the day's trading. Since no equity trades occur at midnight, # the price recorded for this time is forward filled from the # last trade - typically ~4pm the previous day. This results # in the OHLCV data recorded by history() in BTS lagging # that recorded by history in handle_data(). # The trace of the pricing data from history() called within # handle_data() vs. BTS in the above algo is as follows: # When called within handle_data() # --------------------------------- # 2006-03-20 21:00:00 139.369469 # 2006-03-21 21:00:00 180.156620 # 2006-03-22 21:00:00 221.344654 # When called within BTS # --------------------------------- # 2006-03-17 21:00:00 NaN # 2006-03-20 21:00:00 139.369469 # 2006-03-22 00:00:00 180.156620 # Get relevant Timestamps for the history() call within handle_data() oldest_hd_dt = pd.Timestamp( '2006-03-20 4:00 PM', tz='US/Eastern').tz_convert('UTC') penultimate_hd_dt = pd.Timestamp( '2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC') # Get relevant Timestamps for the history() call within BTS penultimate_bts_dt = pd.Timestamp( '2006-03-20 4:00 PM', tz='US/Eastern').tz_convert('UTC') newest_bts_dt = normalize_date(pd.Timestamp( '2006-03-22 04:00 PM', tz='US/Eastern').tz_convert('UTC')) if data_freq == 'daily': # If we're dealing with daily data, then we record # canonicalized timestamps, so make conversion here: oldest_hd_dt = normalize_date(oldest_hd_dt) penultimate_hd_dt = normalize_date(penultimate_hd_dt) penultimate_bts_dt = normalize_date(penultimate_bts_dt) self.assertEquals(prices_hd[oldest_hd_dt], prices_bts[penultimate_bts_dt]) self.assertEquals(prices_hd[penultimate_hd_dt], prices_bts[newest_bts_dt])