def func(case): return case[0].__name__ for case in cases: new_case = list(case) key = func(case) new_case.insert(0, key) results.append(new_case) return results sim_params_daily = SimulationParameters(datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC), datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC), 10000, emission_rate='daily') sim_params_minute = SimulationParameters(datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC), datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC), 10000, emission_rate='minute') returns = factory.create_returns_from_list([1.0], sim_params_daily)
def _run(handle_data, initialize, before_trading_start, analyze, algofile, algotext, defines, data_frequency, capital_base, bundle, bundle_timestamp, start, end, output, trading_calendar, print_algo, metrics_set, local_namespace, environ, blotter, benchmark_returns, broker, state_filename, realtime_bar_target, performance_callback, stop_execution_callback, teardown, execution_id): """ Run a backtest for the given algorithm. This is shared between the cli and :func:`zipline.run_algo`. zipline-live additions: broker - wrapper to connect to a real broker state_filename - saving the context of the algo to be able to restart performance_callback - a callback to send performance results everyday and not only at the end of the backtest. this allows to run live, and monitor the performance of the algorithm stop_execution_callback - A callback to check if execution should be stopped. it is used to be able to stop live trading (also simulation could be stopped using this) execution. if the callback returns True, then algo execution will be aborted. teardown - algo method like handle_data() or before_trading_start() that is called when the algo execution stops execution_id - unique id to identify this execution (backtest or live instance) """ if benchmark_returns is None: benchmark_returns, _ = load_market_data(environ=environ) emission_rate = 'daily' if broker: emission_rate = 'minute' # if we run zipline as a command line tool, these will probably not be initiated if not start: start = pd.Timestamp.utcnow() if not end: # in cli mode, sessions are 1 day only. and it will be re-ran each day by user end = start + pd.Timedelta('1 day') if algotext is not None: if local_namespace: ip = get_ipython() # noqa namespace = ip.user_ns else: namespace = {} for assign in defines: try: name, value = assign.split('=', 2) except ValueError: raise ValueError( 'invalid define %r, should be of the form name=value' % assign, ) try: # evaluate in the same namespace so names may refer to # eachother namespace[name] = eval(value, namespace) except Exception as e: raise ValueError( 'failed to execute definition for name %r: %s' % (name, e), ) elif defines: raise _RunAlgoError( 'cannot pass define without `algotext`', "cannot pass '-D' / '--define' without '-t' / '--algotext'", ) else: namespace = {} if algofile is not None: algotext = algofile.read() if print_algo: if PYGMENTS: highlight( algotext, PythonLexer(), TerminalFormatter(), outfile=sys.stdout, ) else: click.echo(algotext) if trading_calendar is None: trading_calendar = get_calendar('NYSE') # date parameter validation if trading_calendar.session_distance(start, end) < 1: raise _RunAlgoError( 'There are no trading days between %s and %s' % ( start.date(), end.date(), ), ) bundle_data = bundles.load( bundle, environ, bundle_timestamp, ) first_trading_day = \ bundle_data.equity_minute_bar_reader.first_trading_day DataPortalClass = (partial(DataPortalLive, broker) if broker else DataPortal) data = DataPortalClass( bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=first_trading_day, equity_minute_reader=bundle_data.equity_minute_bar_reader, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader, ) pipeline_loader = USEquityPricingLoader( bundle_data.equity_daily_bar_reader, bundle_data.adjustment_reader, ) def choose_loader(column): if column in USEquityPricing.columns: return pipeline_loader raise ValueError("No PipelineLoader registered for column %s." % column) if isinstance(metrics_set, six.string_types): try: metrics_set = metrics.load(metrics_set) except ValueError as e: raise _RunAlgoError(str(e)) if isinstance(blotter, six.string_types): try: blotter = load(Blotter, blotter) except ValueError as e: raise _RunAlgoError(str(e)) TradingAlgorithmClass = (partial(LiveTradingAlgorithm, broker=broker, state_filename=state_filename, realtime_bar_target=realtime_bar_target) if broker else TradingAlgorithm) perf = TradingAlgorithmClass( namespace=namespace, data_portal=data, get_pipeline_loader=choose_loader, trading_calendar=trading_calendar, sim_params=SimulationParameters(start_session=start, end_session=end, trading_calendar=trading_calendar, capital_base=capital_base, emission_rate=emission_rate, data_frequency=data_frequency, execution_id=execution_id), metrics_set=metrics_set, blotter=blotter, benchmark_returns=benchmark_returns, performance_callback=performance_callback, stop_execution_callback=stop_execution_callback, **{ 'initialize': initialize, 'handle_data': handle_data, 'before_trading_start': before_trading_start, 'analyze': analyze, 'teardown': teardown, } if algotext is None else { 'algo_filename': getattr(algofile, 'name', '<algorithm>'), 'script': algotext, }).run() if output == '-': click.echo(str(perf)) elif output != os.devnull: # make the zipline magic not write any data perf.to_pickle(output) return perf
def setUp(self): self.dt = datetime.datetime(2003, 11, 30, tzinfo=pytz.utc) self.end_dt = datetime.datetime(2004, 11, 25, tzinfo=pytz.utc) self.sim_params = SimulationParameters(self.dt, self.end_dt) self.benchmark_events = benchmark_events_in_range(self.sim_params)
plt.legend(loc=0) plt.show() # Load data manually start = datetime(2000, 1, 1, 0, 0, 0, 0, pytz.utc).date() end = datetime(2017, 7, 1, 0, 0, 0, 0, pytz.utc).date() data = load(['SPY'], start=start, end=end) """ Prepare simulation """ sim_params = SimulationParameters( start_session=pd.Timestamp('2004-01-01', tz=pytz.utc), end_session=pd.Timestamp('2017-07-01', tz=pytz.utc), capital_base=1.0e5, data_frequency='daily', trading_calendar=get_calendar("NYSE"), ) period = [3] bottom = [30] top = [70] shift = [15] max_long = [5] prob_window = [10] prob = [20] def worker(args): """thread worker function"""
def test_handle_adjustment(self, set_screen): AAPL, MSFT, BRK_A = assets = self.assets window_lengths = [1, 2, 5, 10] vwaps = self.compute_expected_vwaps(window_lengths) def vwap_key(length): return "vwap_%d" % length def initialize(context): pipeline = Pipeline() context.vwaps = [] for length in vwaps: name = vwap_key(length) factor = VWAP(window_length=length) context.vwaps.append(factor) pipeline.add(factor, name=name) filter_ = (USEquityPricing.close.latest > 300) pipeline.add(filter_, 'filter') if set_screen: pipeline.set_screen(filter_) attach_pipeline(pipeline, 'test') def handle_data(context, data): today = normalize_date(get_datetime()) results = pipeline_output('test') expect_over_300 = { AAPL: today < self.AAPL_split_date, MSFT: False, BRK_A: True, } for asset in assets: should_pass_filter = expect_over_300[asset] if set_screen and not should_pass_filter: self.assertNotIn(asset, results.index) continue asset_results = results.loc[asset] self.assertEqual(asset_results['filter'], should_pass_filter) for length in vwaps: computed = results.loc[asset, vwap_key(length)] expected = vwaps[length][asset].loc[today] # Only having two places of precision here is a bit # unfortunate. assert_almost_equal(computed, expected, decimal=2) # Do the same checks in before_trading_start before_trading_start = handle_data self.run_algorithm( initialize=initialize, handle_data=handle_data, before_trading_start=before_trading_start, sim_params=SimulationParameters( start_session=self.dates[max(window_lengths)], end_session=self.dates[-1], data_frequency='daily', emission_rate='daily', trading_calendar=self.trading_calendar, ) )
def test_multi_source_as_input(self): sim_params = SimulationParameters(self.df.index[0], self.df.index[-1]) algo = TestRegisterTransformAlgorithm(sim_params=sim_params, sids=[0, 1, 133]) algo.run([self.source, self.df_source], overwrite_sim_params=False) self.assertEqual(len(algo.sources), 2)
def test_tracker(self, parameter_comment, days_to_delete): """ @days_to_delete - configures which days in the data set we should remove, used for ensuring that we still return performance messages even when there is no data. """ # This date range covers Columbus day, # however Columbus day is not a market holiday # # October 2008 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start_dt = datetime.datetime(year=2008, month=10, day=9, tzinfo=pytz.utc) end_dt = datetime.datetime(year=2008, month=10, day=16, tzinfo=pytz.utc) trade_count = 6 sid = 133 price = 10.1 price_list = [price] * trade_count volume = [100] * trade_count trade_time_increment = datetime.timedelta(days=1) sim_params = SimulationParameters( period_start=start_dt, period_end=end_dt ) trade_history = factory.create_trade_history( sid, price_list, volume, trade_time_increment, sim_params, source_id="factory1" ) sid2 = 134 price2 = 12.12 price2_list = [price2] * trade_count trade_history2 = factory.create_trade_history( sid2, price2_list, volume, trade_time_increment, sim_params, source_id="factory2" ) # 'middle' start of 3 depends on number of days == 7 middle = 3 # First delete from middle if days_to_delete.middle: del trade_history[middle:(middle + days_to_delete.middle)] del trade_history2[middle:(middle + days_to_delete.middle)] # Delete start if days_to_delete.start: del trade_history[:days_to_delete.start] del trade_history2[:days_to_delete.start] # Delete from end if days_to_delete.end: del trade_history[-days_to_delete.end:] del trade_history2[-days_to_delete.end:] sim_params.first_open = \ sim_params.calculate_first_open() sim_params.last_close = \ sim_params.calculate_last_close() sim_params.capital_base = 1000.0 sim_params.frame_index = [ 'sid', 'volume', 'dt', 'price', 'changed'] perf_tracker = perf.PerformanceTracker( sim_params ) events = date_sorted_sources(trade_history, trade_history2) events = [self.event_with_txn(event, trade_history[0].dt) for event in events] # Extract events with transactions to use for verification. events_with_txns = [event for event in events if event.TRANSACTION] perf_messages = \ [msg for date, snapshot in perf_tracker.transform( itertools.groupby(events, attrgetter('dt'))) for event in snapshot for msg in event.perf_messages] end_perf_messages, risk_message = perf_tracker.handle_simulation_end() perf_messages.extend(end_perf_messages) #we skip two trades, to test case of None transaction self.assertEqual(perf_tracker.txn_count, len(events_with_txns)) cumulative_pos = perf_tracker.cumulative_performance.positions[sid] expected_size = len(events_with_txns) / 2 * -25 self.assertEqual(cumulative_pos.amount, expected_size) self.assertEqual(perf_tracker.last_close, perf_tracker.cumulative_risk_metrics.end_date) self.assertEqual(len(perf_messages), sim_params.days_in_period)
def test_minute_buy_and_hold(self): with trading.TradingEnvironment(): start_date = datetime.datetime( year=2006, month=1, day=3, hour=0, minute=0, tzinfo=pytz.utc) end_date = datetime.datetime( year=2006, month=1, day=5, hour=0, minute=0, tzinfo=pytz.utc) sim_params = SimulationParameters( period_start=start_date, period_end=end_date, emission_rate='daily', data_frequency='minute') algo = BuyAndHoldAlgorithm( sim_params=sim_params, data_frequency='minute') first_date = datetime.datetime(2006, 1, 3, tzinfo=pytz.utc) first_open, first_close = \ trading.environment.get_open_and_close(first_date) second_date = datetime.datetime(2006, 1, 4, tzinfo=pytz.utc) second_open, second_close = \ trading.environment.get_open_and_close(second_date) third_date = datetime.datetime(2006, 1, 5, tzinfo=pytz.utc) third_open, third_close = \ trading.environment.get_open_and_close(third_date) benchmark_data = [ Event({ 'returns': 0.1, 'dt': first_close, 'source_id': 'test-benchmark-source', 'type': DATASOURCE_TYPE.BENCHMARK }), Event({ 'returns': 0.2, 'dt': second_close, 'source_id': 'test-benchmark-source', 'type': DATASOURCE_TYPE.BENCHMARK }), Event({ 'returns': 0.4, 'dt': third_close, 'source_id': 'test-benchmark-source', 'type': DATASOURCE_TYPE.BENCHMARK }), ] trade_bar_data = [ Event({ 'open_price': 10, 'close_price': 15, 'price': 15, 'volume': 1000, 'sid': 1, 'dt': first_open, 'source_id': 'test-trade-source', 'type': DATASOURCE_TYPE.TRADE }), Event({ 'open_price': 10, 'close_price': 15, 'price': 15, 'volume': 1000, 'sid': 1, 'dt': first_open + datetime.timedelta(minutes=10), 'source_id': 'test-trade-source', 'type': DATASOURCE_TYPE.TRADE }), Event({ 'open_price': 15, 'close_price': 20, 'price': 20, 'volume': 2000, 'sid': 1, 'dt': second_open, 'source_id': 'test-trade-source', 'type': DATASOURCE_TYPE.TRADE }), Event({ 'open_price': 15, 'close_price': 20, 'price': 20, 'volume': 2000, 'sid': 1, 'dt': second_open + datetime.timedelta(minutes=10), 'source_id': 'test-trade-source', 'type': DATASOURCE_TYPE.TRADE }), Event({ 'open_price': 20, 'close_price': 15, 'price': 15, 'volume': 1000, 'sid': 1, 'dt': third_open, 'source_id': 'test-trade-source', 'type': DATASOURCE_TYPE.TRADE }), Event({ 'open_price': 20, 'close_price': 15, 'price': 15, 'volume': 1000, 'sid': 1, 'dt': third_open + datetime.timedelta(minutes=10), 'source_id': 'test-trade-source', 'type': DATASOURCE_TYPE.TRADE }), ] algo.benchmark_return_source = benchmark_data algo.sources = list([trade_bar_data]) gen = algo._create_generator(sim_params) crm = algo.perf_tracker.cumulative_risk_metrics first_msg = gen.next() self.assertIsNotNone(first_msg, "There should be a message emitted.") # Protects against bug where the positions appeared to be # a day late, because benchmarks were triggering # calculations before the events for the day were # processed. self.assertEqual(1, len(algo.portfolio.positions), "There should " "be one position after the first day.") self.assertTrue( np.isnan(crm.algorithm_volatility[-1]), "On the first day algorithm volatility does not exist.") second_msg = gen.next() self.assertIsNotNone(second_msg, "There should be a message " "emitted.") self.assertEqual(1, len(algo.portfolio.positions), "Number of positions should stay the same.") # TODO: Hand derive. Current value is just a canary to # detect changes. np.testing.assert_almost_equal( 0.050022510129558301, crm.algorithm_returns[-1], decimal=6) third_msg = gen.next() self.assertEqual(1, len(algo.portfolio.positions), "Number of positions should stay the same.") self.assertIsNotNone(third_msg, "There should be a message " "emitted.") # TODO: Hand derive. Current value is just a canary to # detect changes. np.testing.assert_almost_equal( -0.047639464532418657, crm.algorithm_returns[-1], decimal=6)
def test_daily_buy_and_hold(self): start_date = datetime.datetime( year=2006, month=1, day=3, hour=0, minute=0, tzinfo=pytz.utc) end_date = datetime.datetime( year=2006, month=1, day=5, hour=0, minute=0, tzinfo=pytz.utc) sim_params = SimulationParameters( period_start=start_date, period_end=end_date, emission_rate='daily' ) algo = BuyAndHoldAlgorithm( sim_params=sim_params, data_frequency='daily') first_date = datetime.datetime(2006, 1, 3, tzinfo=pytz.utc) second_date = datetime.datetime(2006, 1, 4, tzinfo=pytz.utc) third_date = datetime.datetime(2006, 1, 5, tzinfo=pytz.utc) trade_bar_data = [ Event({ 'open_price': 10, 'close_price': 15, 'price': 15, 'volume': 1000, 'sid': 1, 'dt': first_date, 'source_id': 'test-trade-source', 'type': DATASOURCE_TYPE.TRADE }), Event({ 'open_price': 15, 'close_price': 20, 'price': 20, 'volume': 2000, 'sid': 1, 'dt': second_date, 'source_id': 'test_list', 'type': DATASOURCE_TYPE.TRADE }), Event({ 'open_price': 20, 'close_price': 15, 'price': 15, 'volume': 1000, 'sid': 1, 'dt': third_date, 'source_id': 'test_list', 'type': DATASOURCE_TYPE.TRADE }), ] benchmark_data = [ Event({ 'returns': 0.1, 'dt': first_date, 'source_id': 'test-benchmark-source', 'type': DATASOURCE_TYPE.BENCHMARK }), Event({ 'returns': 0.2, 'dt': second_date, 'source_id': 'test-benchmark-source', 'type': DATASOURCE_TYPE.BENCHMARK }), Event({ 'returns': 0.4, 'dt': third_date, 'source_id': 'test-benchmark-source', 'type': DATASOURCE_TYPE.BENCHMARK }), ] algo.benchmark_return_source = benchmark_data algo.sources = list([trade_bar_data]) gen = algo._create_generator(sim_params) # TODO: Hand derive these results. # Currently, the output from the time of this writing to # at least be an early warning against changes. expected_algorithm_returns = { first_date: 0.0, second_date: -0.000350, third_date: -0.050018 } # TODO: Hand derive these results. # Currently, the output from the time of this writing to # at least be an early warning against changes. expected_sharpe = { first_date: np.nan, second_date: -1.630920, third_date: -1.016842, } for bar in gen: current_dt = algo.get_datetime() crm = algo.perf_tracker.cumulative_risk_metrics np.testing.assert_almost_equal( expected_algorithm_returns[current_dt], crm.algorithm_returns[-1], decimal=6) np.testing.assert_almost_equal( expected_sharpe[current_dt], crm.sharpe[-1], decimal=6)
def run(opt='twse', debug=False, limit=0): maxlen = 30 starttime = datetime.utcnow() - timedelta(days=300) endtime = datetime.utcnow() report = Report('bbands', sort=[('buys', False), ('sells', False), ('portfolio_value', False)], limit=20) kwargs = {'debug': debug, 'limit': limit, 'opt': opt} idhandler = TwseIdDBHandler( **kwargs) if kwargs['opt'] == 'twse' else OtcIdDBHandler(**kwargs) for stockid in idhandler.stock.get_ids(): try: kwargs = { 'opt': opt, 'targets': ['stock', 'future', 'credit'], 'starttime': starttime, 'endtime': endtime, 'stockids': [stockid], 'traderids': [], 'base': 'stock', 'callback': None, 'limit': 1, 'debug': debug } panel, dbhandler = collect_hisframe(**kwargs) if len(panel[stockid].index) < maxlen: continue sim_params = SimulationParameters( period_start=panel[stockid].index[0], period_end=panel[stockid].index[-1], data_frequency='daily', emission_rate='daily') bbands = BBands(dbhandler=dbhandler, debug=debug, sim_params=sim_params) results = bbands.run(panel).fillna(0) risks = bbands.perf_tracker.handle_simulation_end() report.collect(stockid, results, risks) print "%s pass" % (stockid) except: print traceback.format_exc() continue if report.report.empty: return # report summary stream = report.summary(dtype='html') report.write(stream, 'bbands.html') for stockid in report.iter_symbol(): stream = report.iter_report(stockid, dtype='html') report.write(stream, "bbands_%s.html" % (stockid)) for stockid in report.iter_symbol(): perf = report.pool[stockid] dates = [date2num(i) for i in perf.index[maxlen:]] quotes = [ perf[label][maxlen:].values for label in ['open', 'high', 'low', 'close'] ] quotes = zip(*([dates] + quotes)) fig = plt.figure(facecolor='#07000d') ax1 = plt.subplot2grid((6, 4), (1, 0), rowspan=4, colspan=4, axisbg='#07000d') candlestick_ohlc(ax1, quotes, width=.6, colorup='#53c156', colordown='#ff1717') ax1.plot(dates, perf['upper'][maxlen:].values, '#e1edf9', label='upper', linewidth=1.5) ax1.plot(dates, perf['middle'][maxlen:].values, '#e1edf9', label='middle', linewidth=1.5) ax1.plot(dates, perf['lower'][maxlen:].values, '#e1edf9', label='lower', linewidth=1.5) ax1.grid(True, color='w') ax1.xaxis.set_major_locator(mticker.MaxNLocator(10)) ax1.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d')) ax1.yaxis.label.set_color("w") ax1.spines['bottom'].set_color("#5998ff") ax1.spines['top'].set_color("#5998ff") ax1.spines['left'].set_color("#5998ff") ax1.spines['right'].set_color("#5998ff") ax1.tick_params(axis='y', colors='w') plt.gca().yaxis.set_major_locator(mticker.MaxNLocator(prune='upper')) ax1.tick_params(axis='x', colors='w') plt.ylabel('Stock price and Volume') bbLeg = plt.legend(loc=9, ncol=2, prop={'size': 7}, fancybox=True, borderaxespad=0.) bbLeg.get_frame().set_alpha(0.4) textEd = pylab.gca().get_legend().get_texts() pylab.setp(textEd[0:6], color='w') ax1v = ax1.twinx() ax1v.fill_between(dates, 0, perf['volume'][maxlen:].values, facecolor='#00ffe8', alpha=.4) ax1v.axes.yaxis.set_ticklabels([]) ax1v.grid(False) ###Edit this to 3, so it's a bit larger ax1v.set_ylim(0, 3 * perf['volume'][maxlen:].values.max()) ax1v.spines['bottom'].set_color("#5998ff") ax1v.spines['top'].set_color("#5998ff") ax1v.spines['left'].set_color("#5998ff") ax1v.spines['right'].set_color("#5998ff") ax1v.tick_params(axis='x', colors='w') ax1v.tick_params(axis='y', colors='w') plt.setp(ax1.get_xticklabels(), visible=False) plt.subplots_adjust(left=.09, bottom=.14, right=.94, top=.95, wspace=.20, hspace=0) plt.gcf().set_size_inches(18, 8) plt.savefig("bbands_%s.png" % (stockid), facecolor=fig.get_facecolor())
def _run( handle_data, initialize, before_trading_start, analyze, algofile, algotext, defines, data_frequency, capital_base, bundle, bundle_timestamp, start, end, output, trading_calendar, print_algo, metrics_set, local_namespace, environ, blotter, custom_loader, benchmark_spec, ): """Run a backtest for the given algorithm. This is shared between the cli and :func:`zipline.run_algo`. """ bundle_data = bundles.load( bundle, environ, bundle_timestamp, ) if trading_calendar is None: trading_calendar = get_calendar("XNYS") # date parameter validation if trading_calendar.session_distance(start, end) < 1: raise _RunAlgoError( "There are no trading days between %s and %s" % ( start.date(), end.date(), ), ) benchmark_sid, benchmark_returns = benchmark_spec.resolve( asset_finder=bundle_data.asset_finder, start_date=start, end_date=end, ) if algotext is not None: if local_namespace: ip = get_ipython() # noqa namespace = ip.user_ns else: namespace = {} for assign in defines: try: name, value = assign.split("=", 2) except ValueError: raise ValueError( "invalid define %r, should be of the form name=value" % assign, ) try: # evaluate in the same namespace so names may refer to # eachother namespace[name] = eval(value, namespace) except Exception as e: raise ValueError( "failed to execute definition for name %r: %s" % (name, e), ) elif defines: raise _RunAlgoError( "cannot pass define without `algotext`", "cannot pass '-D' / '--define' without '-t' / '--algotext'", ) else: namespace = {} if algofile is not None: algotext = algofile.read() if print_algo: if PYGMENTS: highlight( algotext, PythonLexer(), TerminalFormatter(), outfile=sys.stdout, ) else: click.echo(algotext) first_trading_day = bundle_data.equity_minute_bar_reader.first_trading_day data = DataPortal( bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=first_trading_day, equity_minute_reader=bundle_data.equity_minute_bar_reader, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader, future_minute_reader=bundle_data.equity_minute_bar_reader, future_daily_reader=bundle_data.equity_daily_bar_reader, ) pipeline_loader = USEquityPricingLoader.without_fx( bundle_data.equity_daily_bar_reader, bundle_data.adjustment_reader, ) def choose_loader(column): if column in USEquityPricing.columns: return pipeline_loader try: return custom_loader.get(column) except KeyError: raise ValueError("No PipelineLoader registered for column %s." % column) if isinstance(metrics_set, str): try: metrics_set = metrics.load(metrics_set) except ValueError as e: raise _RunAlgoError(str(e)) if isinstance(blotter, str): try: blotter = load(Blotter, blotter) except ValueError as e: raise _RunAlgoError(str(e)) try: perf = TradingAlgorithm( namespace=namespace, data_portal=data, get_pipeline_loader=choose_loader, trading_calendar=trading_calendar, sim_params=SimulationParameters( start_session=start, end_session=end, trading_calendar=trading_calendar, capital_base=capital_base, data_frequency=data_frequency, ), metrics_set=metrics_set, blotter=blotter, benchmark_returns=benchmark_returns, benchmark_sid=benchmark_sid, **{ "initialize": initialize, "handle_data": handle_data, "before_trading_start": before_trading_start, "analyze": analyze, } if algotext is None else { "algo_filename": getattr(algofile, "name", "<algorithm>"), "script": algotext, }, ).run() except NoBenchmark: raise _RunAlgoError( ("No ``benchmark_spec`` was provided, and" " ``zipline.api.set_benchmark`` was not called in" " ``initialize``."), ("Neither '--benchmark-symbol' nor '--benchmark-sid' was" " provided, and ``zipline.api.set_benchmark`` was not called" " in ``initialize``. Did you mean to pass '--no-benchmark'?"), ) if output == "-": click.echo(str(perf)) elif output != os.devnull: # make the zipline magic not write any data perf.to_pickle(output) return perf
def run(opt='twse', debug=False, limit=0): """ as doctest run """ maxlen = 30 starttime = datetime.utcnow() - timedelta(days=300) endtime = datetime.utcnow() report = Report('dualema', sort=[('buys', False), ('sells', False), ('portfolio_value', False)], limit=20) kwargs = {'debug': debug, 'limit': limit, 'opt': opt} # fetch idhandler = TwseIdDBHandler( **kwargs) if kwargs['opt'] == 'twse' else OtcIdDBHandler(**kwargs) for stockid in idhandler.stock.get_ids(): try: kwargs = { 'opt': opt, 'targets': ['stock'], 'starttime': starttime, 'endtime': endtime, 'stockids': [stockid], 'traderids': [], 'base': 'stock', 'callback': None, 'limit': 1, 'debug': debug } panel, dbhandler = collect_hisframe(**kwargs) if len(panel[stockid].index) < maxlen: continue sim_params = SimulationParameters( period_start=panel[stockid].index[0], period_end=panel[stockid].index[-1], data_frequency='daily', emission_rate='daily') dualema = DualEMA(dbhandler=dbhandler, debug=debug, sim_params=sim_params) results = dualema.run(panel).fillna(0) risks = dualema.perf_tracker.handle_simulation_end() report.collect(stockid, results, risks) print "%s pass" % (stockid) except: print traceback.format_exc() continue if report.report.empty: return # report summary stream = report.summary(dtype='html') report.write(stream, 'dualema.html') for stockid in report.iter_symbol(): stream = report.iter_report(stockid, dtype='html') report.write(stream, "dualema_%s.html" % (stockid)) # plot for stockid in report.iter_symbol(): try: perf = report.pool[stockid] fig = plt.figure() ax1 = fig.add_subplot(211, ylabel='portfolio value') perf.portfolio_value.plot(ax=ax1) ax2 = fig.add_subplot(212) perf[['short_ema', 'long_ema']].plot(ax=ax2) ax2.plot(perf.ix[perf.buy].index, perf.short_ema[perf.buy], '^', markersize=10, color='m') ax2.plot(perf.ix[perf.sell].index, perf.short_ema[perf.sell], 'v', markersize=10, color='k') plt.legend(loc=0) plt.gcf().set_size_inches(18, 8) plt.savefig("dualema_%s.png" % (stockid)) #plt.show() except: continue
def test_tracker(self, parameter_comment, days_to_delete): """ @days_to_delete - configures which days in the data set we should remove, used for ensuring that we still return performance messages even when there is no data. """ # This date range covers Columbus day, # however Columbus day is not a market holiday # # October 2008 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start_dt = datetime.datetime(year=2008, month=10, day=9, tzinfo=pytz.utc) end_dt = datetime.datetime(year=2008, month=10, day=16, tzinfo=pytz.utc) trade_count = 6 sid = 133 price = 10.1 price_list = [price] * trade_count volume = [100] * trade_count trade_time_increment = datetime.timedelta(days=1) sim_params = SimulationParameters(period_start=start_dt, period_end=end_dt) trade_history = factory.create_trade_history(sid, price_list, volume, trade_time_increment, sim_params, source_id="factory1") sid2 = 134 price2 = 12.12 price2_list = [price2] * trade_count trade_history2 = factory.create_trade_history(sid2, price2_list, volume, trade_time_increment, sim_params, source_id="factory2") # 'middle' start of 3 depends on number of days == 7 middle = 3 # First delete from middle if days_to_delete.middle: del trade_history[middle:(middle + days_to_delete.middle)] del trade_history2[middle:(middle + days_to_delete.middle)] # Delete start if days_to_delete.start: del trade_history[:days_to_delete.start] del trade_history2[:days_to_delete.start] # Delete from end if days_to_delete.end: del trade_history[-days_to_delete.end:] del trade_history2[-days_to_delete.end:] sim_params.first_open = \ sim_params.calculate_first_open() sim_params.last_close = \ sim_params.calculate_last_close() sim_params.capital_base = 1000.0 sim_params.frame_index = ['sid', 'volume', 'dt', 'price', 'changed'] perf_tracker = perf.PerformanceTracker(sim_params) events = date_sorted_sources(trade_history, trade_history2) events = [ self.event_with_txn(event, trade_history[0].dt) for event in events ] # Extract events with transactions to use for verification. events_with_txns = [event for event in events if event.TRANSACTION] perf_messages = \ [msg for date, snapshot in perf_tracker.transform( itertools.groupby(events, attrgetter('dt'))) for event in snapshot for msg in event.perf_messages] end_perf_messages, risk_message = perf_tracker.handle_simulation_end() perf_messages.extend(end_perf_messages) #we skip two trades, to test case of None transaction self.assertEqual(perf_tracker.txn_count, len(events_with_txns)) cumulative_pos = perf_tracker.cumulative_performance.positions[sid] expected_size = len(events_with_txns) / 2 * -25 self.assertEqual(cumulative_pos.amount, expected_size) self.assertEqual(perf_tracker.last_close, perf_tracker.cumulative_risk_metrics.end_date) self.assertEqual(len(perf_messages), sim_params.days_in_period)
def run(self, algo, tBeg=None, tEnd=None, commission=None, slippage=None, warn=False, bar_source='yahoo', adjusted=True, data_frequency='daily', include_open=True, csi_port='ETFs'): # Set a default algo if none is provided if not algo: raise Exception('No algo provided') self.algo = algo # set commission model if commission: self.algo.set_commission(commission) else: self.algo.set_commission( PerShareWithMin(comm_per_share=0.01, comm_min=1.0)) # set slippage model if slippage: self.algo.set_slippage(slippage) else: self.algo.set_slippage(FixedSlippage(spread=0.0)) # guess starting and ending dates if none are provided if not tEnd: tEnd = get_end_date() if not tBeg: tBeg = get_start_date(self.algo.iL, tNow=tEnd) tBeg = pytz.utc.localize(tBeg) tEnd = pytz.utc.localize(tEnd) self.sim_params = SimulationParameters(tBeg, tEnd, data_frequency=data_frequency, capital_base=self.capital_base, make_new_environment=True, extra_dates=[]) # print self.sim_params source = self.get_bar_source(tBeg, tEnd, bar_source, adjusted=adjusted, include_open=include_open, csi_port=csi_port) if bar_source == 'redshift': bench_source, self.bench_price_utc = redshift.get_bench_source( tBeg, tEnd) else: bench_source = None sources = [source] if not warn: # turn off warnings import warnings warnings.filterwarnings('ignore') self.run_time = time.time() self.results = self.algo.run(sources, sim_params=self.sim_params, benchmark_return_source=bench_source) self.run_time = time.time() - self.run_time
def test_tracker(self, parameter_comment, days_to_delete): """ @days_to_delete - configures which days in the data set we should remove, used for ensuring that we still return performance messages even when there is no data. """ # This date range covers Columbus day, # however Columbus day is not a market holiday # # October 2008 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start_dt = datetime.datetime(year=2008, month=10, day=9, tzinfo=pytz.utc) end_dt = datetime.datetime(year=2008, month=10, day=16, tzinfo=pytz.utc) trade_count = 6 sid = 133 price = 10.1 price_list = [price] * trade_count volume = [100] * trade_count trade_time_increment = datetime.timedelta(days=1) sim_params = SimulationParameters( period_start=start_dt, period_end=end_dt ) benchmark_events = benchmark_events_in_range(sim_params) trade_history = factory.create_trade_history( sid, price_list, volume, trade_time_increment, sim_params, source_id="factory1" ) sid2 = 134 price2 = 12.12 price2_list = [price2] * trade_count trade_history2 = factory.create_trade_history( sid2, price2_list, volume, trade_time_increment, sim_params, source_id="factory2" ) # 'middle' start of 3 depends on number of days == 7 middle = 3 # First delete from middle if days_to_delete.middle: del trade_history[middle:(middle + days_to_delete.middle)] del trade_history2[middle:(middle + days_to_delete.middle)] # Delete start if days_to_delete.start: del trade_history[:days_to_delete.start] del trade_history2[:days_to_delete.start] # Delete from end if days_to_delete.end: del trade_history[-days_to_delete.end:] del trade_history2[-days_to_delete.end:] sim_params.first_open = \ sim_params.calculate_first_open() sim_params.last_close = \ sim_params.calculate_last_close() sim_params.capital_base = 1000.0 sim_params.frame_index = [ 'sid', 'volume', 'dt', 'price', 'changed'] perf_tracker = perf.PerformanceTracker( sim_params ) events = date_sorted_sources(trade_history, trade_history2) events = [event for event in self.trades_with_txns(events, trade_history[0].dt)] # Extract events with transactions to use for verification. txns = [event for event in events if event.type == DATASOURCE_TYPE.TRANSACTION] orders = [event for event in events if event.type == DATASOURCE_TYPE.ORDER] all_events = date_sorted_sources(events, benchmark_events) filtered_events = [filt_event for filt_event in all_events if filt_event.dt <= end_dt] filtered_events.sort(key=lambda x: x.dt) grouped_events = itertools.groupby(filtered_events, lambda x: x.dt) perf_messages = [] for date, group in grouped_events: for event in group: perf_tracker.process_event(event) msg = perf_tracker.handle_market_close() perf_messages.append(msg) self.assertEqual(perf_tracker.txn_count, len(txns)) self.assertEqual(perf_tracker.txn_count, len(orders)) cumulative_pos = perf_tracker.cumulative_performance.positions[sid] expected_size = len(txns) / 2 * -25 self.assertEqual(cumulative_pos.amount, expected_size) self.assertEqual(len(perf_messages), sim_params.days_in_period)
def _run(handle_data, initialize, before_trading_start, analyze, algofile, algotext, defines, data_frequency, capital_base, bundle, bundle_timestamp, start, end, output, trading_calendar, print_algo, metrics_set, local_namespace, environ, blotter, benchmark_returns): """Run a backtest for the given algorithm. This is shared between the cli and :func:`zipline.run_algo`. """ if benchmark_returns is None: benchmark_returns = load_benchmark_data(environ=environ) if algotext is not None: if local_namespace: ip = get_ipython() # noqa namespace = ip.user_ns else: namespace = {} for assign in defines: try: name, value = assign.split('=', 2) except ValueError: raise ValueError( 'invalid define %r, should be of the form name=value' % assign, ) try: # evaluate in the same namespace so names may refer to # eachother namespace[name] = eval(value, namespace) except Exception as e: raise ValueError( 'failed to execute definition for name %r: %s' % (name, e), ) elif defines: raise _RunAlgoError( 'cannot pass define without `algotext`', "cannot pass '-D' / '--define' without '-t' / '--algotext'", ) else: namespace = {} if algofile is not None: algotext = algofile.read() if print_algo: if PYGMENTS: highlight( algotext, PythonLexer(), TerminalFormatter(), outfile=sys.stdout, ) else: click.echo(algotext) if trading_calendar is None: trading_calendar = get_calendar('XNYS') # date parameter validation if trading_calendar.session_distance(start, end) < 1: raise _RunAlgoError( 'There are no trading days between %s and %s' % ( start.date(), end.date(), ), ) bundle_data = bundles.load( bundle, environ, bundle_timestamp, ) first_trading_day = \ bundle_data.equity_minute_bar_reader.first_trading_day data = DataPortal( bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=first_trading_day, equity_minute_reader=bundle_data.equity_minute_bar_reader, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader, ) pipeline_loader = USEquityPricingLoader.without_fx( bundle_data.equity_daily_bar_reader, bundle_data.adjustment_reader, ) def choose_loader(column): if column in USEquityPricing.columns: return pipeline_loader raise ValueError("No PipelineLoader registered for column %s." % column) if isinstance(metrics_set, six.string_types): try: metrics_set = metrics.load(metrics_set) except ValueError as e: raise _RunAlgoError(str(e)) if isinstance(blotter, six.string_types): try: blotter = load(Blotter, blotter) except ValueError as e: raise _RunAlgoError(str(e)) perf = TradingAlgorithm( namespace=namespace, data_portal=data, get_pipeline_loader=choose_loader, trading_calendar=trading_calendar, sim_params=SimulationParameters( start_session=start, end_session=end, trading_calendar=trading_calendar, capital_base=capital_base, data_frequency=data_frequency, # fix for minute perf emission_rate=data_frequency), metrics_set=metrics_set, blotter=blotter, benchmark_returns=benchmark_returns, **{ 'initialize': initialize, 'handle_data': handle_data, 'before_trading_start': before_trading_start, 'analyze': analyze, } if algotext is None else { 'algo_filename': getattr(algofile, 'name', '<algorithm>'), 'script': algotext, }).run() if output == '-': click.echo(str(perf)) elif output != os.devnull: # make the zipline magic not write any data perf.to_pickle(output) return perf
def test_minute_tracker(self): """ Tests minute performance tracking.""" with trading.TradingEnvironment(): start_dt = trading.environment.exchange_dt_in_utc( datetime.datetime(2013, 3, 1, 9, 31)) end_dt = trading.environment.exchange_dt_in_utc( datetime.datetime(2013, 3, 1, 16, 0)) sim_params = SimulationParameters( period_start=start_dt, period_end=end_dt, emission_rate='minute' ) tracker = perf.PerformanceTracker(sim_params) foo_event_1 = factory.create_trade('foo', 10.0, 20, start_dt) order_event_1 = Order(sid=foo_event_1.sid, amount=-25, dt=foo_event_1.dt) bar_event_1 = factory.create_trade('bar', 100.0, 200, start_dt) txn_event_1 = Transaction(sid=foo_event_1.sid, amount=-25, dt=foo_event_1.dt, price=10.0, commission=0.50, order_id=order_event_1.id) benchmark_event_1 = Event({ 'dt': start_dt, 'returns': 0.01, 'type': DATASOURCE_TYPE.BENCHMARK }) foo_event_2 = factory.create_trade( 'foo', 11.0, 20, start_dt + datetime.timedelta(minutes=1)) bar_event_2 = factory.create_trade( 'bar', 11.0, 20, start_dt + datetime.timedelta(minutes=1)) benchmark_event_2 = Event({ 'dt': start_dt + datetime.timedelta(minutes=1), 'returns': 0.02, 'type': DATASOURCE_TYPE.BENCHMARK }) events = [ foo_event_1, order_event_1, benchmark_event_1, txn_event_1, bar_event_1, foo_event_2, benchmark_event_2, bar_event_2, ] grouped_events = itertools.groupby( events, operator.attrgetter('dt')) messages = {} for date, group in grouped_events: tracker.set_date(date) for event in group: tracker.process_event(event) tracker.handle_minute_close(date) msg = tracker.to_dict() messages[date] = msg self.assertEquals(2, len(messages)) msg_1 = messages[foo_event_1.dt] msg_2 = messages[foo_event_2.dt] self.assertEquals(1, len(msg_1['minute_perf']['transactions']), "The first message should contain one " "transaction.") # Check that transactions aren't emitted for previous events. self.assertEquals(0, len(msg_2['minute_perf']['transactions']), "The second message should have no " "transactions.") self.assertEquals(1, len(msg_1['minute_perf']['orders']), "The first message should contain one orders.") # Check that orders aren't emitted for previous events. self.assertEquals(0, len(msg_2['minute_perf']['orders']), "The second message should have no orders.") # Ensure that period_close moves through time. # Also, ensure that the period_closes are the expected dts. self.assertEquals(foo_event_1.dt, msg_1['minute_perf']['period_close']) self.assertEquals(foo_event_2.dt, msg_2['minute_perf']['period_close']) # Ensure that a Sharpe value for cumulative metrics is being # created. self.assertIsNotNone(msg_1['cumulative_risk_metrics']['sharpe']) self.assertIsNotNone(msg_2['cumulative_risk_metrics']['sharpe'])
def test_old_new_data_api_paths(self): """ Test that the new and old data APIs hit the same code paths. We want to ensure that the old data API(data[sid(N)].field and similar) and the new data API(data.current(sid(N), field) and similar) hit the same code paths on the DataPortal. """ test_start_minute = self.trading_calendar.minutes_for_session( self.sim_params.sessions[0])[1] test_end_minute = self.trading_calendar.minutes_for_session( self.sim_params.sessions[0])[-1] bar_data = self.create_bardata(lambda: test_end_minute, ) ohlcvp_fields = [ "open", "high", "low" "close", "volume", "price", ] spot_value_meth = 'zipline.data.data_portal.DataPortal.get_spot_value' def assert_get_spot_value_called(fun, field): """ Assert that get_spot_value was called during the execution of fun. Takes in a function fun and a string field. """ with patch(spot_value_meth) as gsv: fun() gsv.assert_called_with(self.asset1, field, test_end_minute, 'minute') # Ensure that data.current(sid(n), field) has the same behaviour as # data[sid(n)].field. for field in ohlcvp_fields: assert_get_spot_value_called( lambda: getattr(bar_data[self.asset1], field), field, ) assert_get_spot_value_called( lambda: bar_data.current(self.asset1, field), field, ) history_meth = 'zipline.data.data_portal.DataPortal.get_history_window' def assert_get_history_window_called(fun, is_legacy): """ Assert that get_history_window was called during fun(). Takes in a function fun and a boolean is_legacy. """ with patch(history_meth) as ghw: fun() # Slightly hacky, but done to get around the fact that # history( explicitly passes an ffill param as the last arg, # while data.history doesn't. if is_legacy: ghw.assert_called_with( [self.asset1, self.asset2, self.asset3], test_end_minute, 5, "1m", "volume", "minute", True) else: ghw.assert_called_with( [self.asset1, self.asset2, self.asset3], test_end_minute, 5, "1m", "volume", "minute", ) test_sim_params = SimulationParameters( start_session=test_start_minute, end_session=test_end_minute, data_frequency="minute", trading_calendar=self.trading_calendar, ) history_algorithm = self.create_algo(history_algo, sim_params=test_sim_params) assert_get_history_window_called(lambda: history_algorithm.run(), is_legacy=True) assert_get_history_window_called(lambda: bar_data.history( [self.asset1, self.asset2, self.asset3], "volume", 5, "1m"), is_legacy=False)
def test_history_passed_to_talib(self): """ Had an issue where MagicMock was causing errors during validation with talib. We don't officially support a talib integration, yet. But using talib directly should work. """ algo_text = """ import talib import numpy as np from zipline.api import history, add_history, record def initialize(context): add_history(2, '1d', 'price') def handle_data(context, data): prices = history(2, '1d', 'price') ma_result = talib.MA(np.asarray(prices[0]), timeperiod=2) record(ma=ma_result[-1]) """.strip() # April 2007 # Su Mo Tu We Th Fr Sa # 1 2 3 4 5 6 7 # 8 9 10 11 12 13 14 # 15 16 17 18 19 20 21 # 22 23 24 25 26 27 28 # 29 30 # Eddie: this was set to 04-10 but I don't see how that makes # sense as it does not generate enough data to get at -2 index # below. start = pd.Timestamp('2007-04-05', tz='UTC') end = pd.Timestamp('2007-04-10', tz='UTC') sim_params = SimulationParameters( period_start=start, period_end=end, capital_base=float("1.0e5"), data_frequency='minute', emission_rate='daily' ) test_algo = TradingAlgorithm( script=algo_text, data_frequency='minute', sim_params=sim_params ) source = RandomWalkSource(start=start, end=end) output = test_algo.run(source) # At this point, just ensure that there is no crash. self.assertIsNotNone(output) recorded_ma = output.ix[-2, 'ma'] self.assertFalse(pd.isnull(recorded_ma)) # Depends on seed np.testing.assert_almost_equal(recorded_ma, 159.76304468946876)
def test_simple_transforms(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("ignore", PerformanceWarning) warnings.simplefilter("default", ZiplineDeprecationWarning) sim_params = SimulationParameters( start_session=self.sim_params.sessions[8], end_session=self.sim_params.sessions[-1], data_frequency="minute", trading_calendar=self.trading_calendar, ) algo = self.create_algo(simple_transforms_algo, sim_params=sim_params) algo.run() self.assertEqual(8, len(w)) transforms = ["mavg", "vwap", "stddev", "returns"] for idx, line_no in enumerate(range(8, 12)): warning1 = w[idx * 2] warning2 = w[(idx * 2) + 1] self.assertEqual("<string>", warning1.filename) self.assertEqual("<string>", warning2.filename) self.assertEqual(line_no, warning1.lineno) self.assertEqual(line_no, warning2.lineno) self.assertEqual( "`data[sid(N)]` is deprecated. Use " "`data.current`.", str(warning1.message)) self.assertEqual( "The `{0}` method is " "deprecated.".format(transforms[idx]), str(warning2.message)) # now verify the transform values # minute price # 2016-01-11 14:31:00+00:00 1561 # ... # 2016-01-14 20:59:00+00:00 3119 # 2016-01-14 21:00:00+00:00 3120 # 2016-01-15 14:31:00+00:00 3121 # 2016-01-15 14:32:00+00:00 3122 # 2016-01-15 14:33:00+00:00 3123 # volume # 2016-01-11 14:31:00+00:00 156100 # ... # 2016-01-14 20:59:00+00:00 311900 # 2016-01-14 21:00:00+00:00 312000 # 2016-01-15 14:31:00+00:00 312100 # 2016-01-15 14:32:00+00:00 312200 # 2016-01-15 14:33:00+00:00 312300 # daily price (last day built with minute data) # 2016-01-14 00:00:00+00:00 9 # 2016-01-15 00:00:00+00:00 3123 # mavg = average of all the prices = (1561 + 3123) / 2 = 2342 # vwap = sum(price * volume) / sum(volumes) # = 889119531400.0 / 366054600.0 # = 2428.9259891830343 # stddev = stddev(price, ddof=1) = 451.3435498597493 # returns = (todayprice - yesterdayprice) / yesterdayprice # = (3123 - 9) / 9 = 346 self.assertEqual(2342, algo.mavg) self.assertAlmostEqual(2428.92599, algo.vwap, places=5) self.assertAlmostEqual(451.34355, algo.stddev, places=5) self.assertAlmostEqual(346, algo.returns)
def test_tracker(self, parameter_comment, days_to_delete): """ @days_to_delete - configures which days in the data set we should remove, used for ensuring that we still return performance messages even when there is no data. """ # This date range covers Columbus day, # however Columbus day is not a market holiday # # October 2008 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start_dt = datetime(year=2008, month=10, day=9, tzinfo=pytz.utc) end_dt = datetime(year=2008, month=10, day=16, tzinfo=pytz.utc) trade_count = 6 sid = 133 price = 10.1 price_list = [price] * trade_count volume = [100] * trade_count trade_time_increment = timedelta(days=1) sim_params = SimulationParameters( period_start=start_dt, period_end=end_dt ) benchmark_events = benchmark_events_in_range(sim_params) trade_history = factory.create_trade_history( sid, price_list, volume, trade_time_increment, sim_params, source_id="factory1" ) sid2 = 134 price2 = 12.12 price2_list = [price2] * trade_count trade_history2 = factory.create_trade_history( sid2, price2_list, volume, trade_time_increment, sim_params, source_id="factory2" ) # 'middle' start of 3 depends on number of days == 7 middle = 3 # First delete from middle if days_to_delete.middle: del trade_history[middle:(middle + days_to_delete.middle)] del trade_history2[middle:(middle + days_to_delete.middle)] # Delete start if days_to_delete.start: del trade_history[:days_to_delete.start] del trade_history2[:days_to_delete.start] # Delete from end if days_to_delete.end: del trade_history[-days_to_delete.end:] del trade_history2[-days_to_delete.end:] sim_params.first_open = \ sim_params.calculate_first_open() sim_params.last_close = \ sim_params.calculate_last_close() sim_params.capital_base = 1000.0 sim_params.frame_index = [ 'sid', 'volume', 'dt', 'price', 'changed'] perf_tracker = perf.PerformanceTracker( sim_params ) events = date_sorted_sources(trade_history, trade_history2) events = [event for event in self.trades_with_txns(events, trade_history[0].dt)] # Extract events with transactions to use for verification. txns = [event for event in events if event.type == zp.DATASOURCE_TYPE.TRANSACTION] orders = [event for event in events if event.type == zp.DATASOURCE_TYPE.ORDER] all_events = date_sorted_sources(events, benchmark_events) filtered_events = [filt_event for filt_event in all_events if filt_event.dt <= end_dt] filtered_events.sort(key=lambda x: x.dt) grouped_events = itertools.groupby(filtered_events, lambda x: x.dt) perf_messages = [] for date, group in grouped_events: for event in group: perf_tracker.process_event(event) msg = perf_tracker.handle_market_close_daily() perf_messages.append(msg) self.assertEqual(perf_tracker.txn_count, len(txns)) self.assertEqual(perf_tracker.txn_count, len(orders)) cumulative_pos = perf_tracker.cumulative_performance.positions[sid] expected_size = len(txns) / 2 * -25 self.assertEqual(cumulative_pos.amount, expected_size) self.assertEqual(len(perf_messages), sim_params.days_in_period)
def run(opt='twse', debug=False, limit=0): """ as doctest run """ maxlen = 5 starttime = datetime.utcnow() - timedelta(days=15) endtime = datetime.utcnow() report = Report('besttrader', sort=[('buys', False), ('sells', False), ('portfolio_value', False)], limit=20) kwargs = {'debug': debug, 'limit': limit, 'opt': opt} # 1590:u'花旗環球', 1440:u'美林' traderid = '1440' idhandler = TwseIdDBHandler( **kwargs) if kwargs['opt'] == 'twse' else OtcIdDBHandler(**kwargs) for stockid in idhandler.stock.get_ids(): try: # pre find traderid as top0 kwargs = { 'opt': opt, 'targets': ['trader'], 'starttime': starttime, 'endtime': endtime, 'stockids': [stockid], 'traderids': [], 'base': 'stock', 'constraint': lambda x: x.value["ebuyratio"] > 10 or x.value["totalkeepbuy"] >= 1, 'order': lambda x: [-x.value["totalvolume"], -x.value["totalbuyratio"]], 'callback': None, 'limit': 10, 'debug': debug } panel, dbhandler = collect_hisframe(**kwargs) tops = list( dbhandler.trader.get_alias([stockid], 'trader', ["top%d" % i for i in range(10)])) if not tops: continue print "%s prefound:%s" % (stockid, tops) traderid = tops[0] if traderid not in tops else traderid # run kwargs = { 'opt': opt, 'targets': ['stock', 'trader', 'future', 'credit'], 'starttime': starttime, 'endtime': endtime, 'stockids': [stockid], 'traderids': [traderid], 'base': 'trader', 'callback': None, 'limit': 10, 'debug': debug } panel, dbhandler = collect_hisframe(**kwargs) if len(panel[stockid].index) < maxlen: continue sim_params = SimulationParameters( period_start=panel[stockid].index[0], period_end=panel[stockid].index[-1], data_frequency='daily', emission_rate='daily') besttrader = BestTrader(dbhandler=dbhandler, debug=debug, sim_params=sim_params) results = besttrader.run(panel).fillna(0) risks = besttrader.perf_tracker.handle_simulation_end() report.collect(stockid, results, risks) print "%s pass" % (stockid) except: print traceback.format_exc() continue if report.report.empty: return # report summary stream = report.summary(dtype='html') report.write(stream, 'besttrader.html') for stockid in report.iter_symbol(): stream = report.iter_report(stockid, dtype='html') report.write(stream, "besttrader_%s.html" % (stockid)) # plot for stockid in report.iter_symbol(): try: perf = report.pool[stockid] fig = plt.figure() ax1 = fig.add_subplot(211, ylabel='portfolio value') perf.portfolio_value.plot(ax=ax1) ax2 = fig.add_subplot(212) perf[['close']].plot(ax=ax2) ax2.plot(perf.ix[perf.buy].index, perf.close[perf.buy], '^', markersize=10, color='m') ax2.plot(perf.ix[perf.sell].index, perf.close[perf.sell], 'v', markersize=10, color='k') plt.legend(loc=0) plt.gcf().set_size_inches(18, 8) plt.savefig("besttrader_%s_%s.png" % (traderid, stockid)) #plt.show() except: continue