def surcharge_market_data(self, bm_symbol='^GSPC'): #TODO Parametric #event_dt = datetime.today().replace(tzinfo=pytz.utc) event_dt = self.normalize_date(datetime.now()) #TODO Handle invalid code for exchange, infos in datautils.Exchanges.iteritems(): if infos['symbol'] == bm_symbol: code = datautils.Exchanges[exchange]['code'] break bm_returns, tr_curves = zipline.load_market_data(bm_symbol) dates = pd.date_range(event_dt, periods=len(bm_returns)) #NOTE What is tr_curves['tid'] ? #TODO Replace values to detect the fake later tr_fake = OrderedDict(sorted( ((pd.Timestamp(event_dt + i*self.offset), c) for i, c in enumerate(tr_curves.values())), key=lambda t: t[0])) bm_fake = pd.Series([code] * len(dates), index=dates) for i, dt in enumerate(tr_curves.keys()): pd.Timestamp(event_dt + i * self.offset) return bm_fake, tr_fake
def _load_live_market_data(self, bm_symbol='^GSPC'): #TODO Parametric #event_dt = datetime.today().replace(tzinfo=pytz.utc) event_dt = self.normalize_date(datetime.now()) bm_returns, tr_curves = zipline.load_market_data(bm_symbol) dates = pd.date_range(event_dt, periods=len(bm_returns)) #NOTE What is tr_curves['tid'] ? #TODO Replace values to detect the fake later tr_fake = OrderedDict( sorted(((pd.Timestamp(event_dt + i * self.offset), c) for i, c in enumerate(tr_curves.values())), key=lambda t: t[0])) # NOTE the code concept is deprecated bm_fake = pd.Series([1001] * len(dates), index=dates) for i, dt in enumerate(tr_curves.keys()): pd.Timestamp(event_dt + i * self.offset) return bm_fake, tr_fake
def _load_live_market_data(self, bm_symbol='^GSPC'): #TODO Parametric #event_dt = datetime.today().replace(tzinfo=pytz.utc) event_dt = self.normalize_date(datetime.now()) bm_returns, tr_curves = zipline.load_market_data(bm_symbol) dates = pd.date_range(event_dt, periods=len(bm_returns)) #NOTE What is tr_curves['tid'] ? #TODO Replace values to detect the fake later tr_fake = OrderedDict(sorted( ((pd.Timestamp(event_dt + i * self.offset), c) for i, c in enumerate(tr_curves.values())), key=lambda t: t[0])) # NOTE the code concept is deprecated bm_fake = pd.Series([1001] * len(dates), index=dates) for i, dt in enumerate(tr_curves.keys()): pd.Timestamp(event_dt + i * self.offset) return bm_fake, tr_fake
def _run(handle_data, initialize, before_trading_start, analyze, algofile, algotext, defines, data_frequency, capital_base, bundle, bundle_timestamp, custom_data_portal, start, end, output, trading_calendar, print_algo, metrics_set, local_namespace, environ, blotter, benchmark_returns): """Run a backtest for the given algorithm. This is shared between the cli and :func:`zipline.run_algo`. """ if benchmark_returns is None: benchmark_returns, _ = load_market_data(environ=environ) if algotext is not None: if local_namespace: ip = get_ipython() # noqa namespace = ip.user_ns else: namespace = {} for assign in defines: try: name, value = assign.split('=', 2) except ValueError: raise ValueError( 'invalid define %r, should be of the form name=value' % assign, ) try: # evaluate in the same namespace so names may refer to # eachother namespace[name] = eval(value, namespace) except Exception as e: raise ValueError( 'failed to execute definition for name %r: %s' % (name, e), ) elif defines: raise _RunAlgoError( 'cannot pass define without `algotext`', "cannot pass '-D' / '--define' without '-t' / '--algotext'", ) else: namespace = {} if algofile is not None: algotext = algofile.read() if print_algo: if PYGMENTS: highlight( algotext, PythonLexer(), TerminalFormatter(), outfile=sys.stdout, ) else: click.echo(algotext) if trading_calendar is None: trading_calendar = get_calendar('XNYS') # date parameter validation if trading_calendar.session_distance(start, end) < 1: raise _RunAlgoError( 'There are no trading days between %s and %s' % ( start.date(), end.date(), ), ) bundle_data = bundles.load( bundle, environ, bundle_timestamp, ) # TODO: Fix this for the custom DataPortal case. first_trading_day = \ bundle_data.equity_minute_bar_reader.first_trading_day if custom_data_portal is None: data = DataPortal( bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=first_trading_day, equity_minute_reader=bundle_data.equity_minute_bar_reader, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader, ) else: data = custom_data_portal # TODO: Fix this for the custom DataPortal case. pipeline_loader = USEquityPricingLoader( bundle_data.equity_daily_bar_reader, bundle_data.adjustment_reader, ) def choose_loader(column): if column in USEquityPricing.columns: return pipeline_loader raise ValueError( "No PipelineLoader registered for column %s." % column ) if isinstance(metrics_set, six.string_types): try: metrics_set = metrics.load(metrics_set) except ValueError as e: raise _RunAlgoError(str(e)) if isinstance(blotter, six.string_types): try: blotter = load(Blotter, blotter) except ValueError as e: raise _RunAlgoError(str(e)) perf = TradingAlgorithm( namespace=namespace, data_portal=data, get_pipeline_loader=choose_loader, trading_calendar=trading_calendar, sim_params=SimulationParameters( start_session=start, end_session=end, trading_calendar=trading_calendar, capital_base=capital_base, data_frequency=data_frequency, ), metrics_set=metrics_set, blotter=blotter, benchmark_returns=benchmark_returns, **{ 'initialize': initialize, 'handle_data': handle_data, 'before_trading_start': before_trading_start, 'analyze': analyze, } if algotext is None else { 'algo_filename': getattr(algofile, 'name', '<algorithm>'), 'script': algotext, } ).run() if output == '-': click.echo(str(perf)) elif output != os.devnull: # make the zipline magic not write any data perf.to_pickle(output) return perf
def _run(handle_data, initialize, before_trading_start, analyze, algofile, algotext, defines, data_frequency, capital_base, bundle, bundle_timestamp, start, end, output, trading_calendar, print_algo, metrics_set, local_namespace, environ, blotter, benchmark_returns): """Run a backtest for the given algorithm. This is shared between the cli and :func:`zipline.run_algo`. """ if benchmark_returns is None: benchmark_returns, _ = load_market_data(environ=environ) if algotext is not None: if local_namespace: ip = get_ipython() # noqa namespace = ip.user_ns else: namespace = {} for assign in defines: try: name, value = assign.split('=', 2) except ValueError: raise ValueError( 'invalid define %r, should be of the form name=value' % assign, ) try: # evaluate in the same namespace so names may refer to # eachother namespace[name] = eval(value, namespace) except Exception as e: raise ValueError( 'failed to execute definition for name %r: %s' % (name, e), ) elif defines: raise _RunAlgoError( 'cannot pass define without `algotext`', "cannot pass '-D' / '--define' without '-t' / '--algotext'", ) else: namespace = {} if algofile is not None: algotext = algofile.read() if print_algo: if PYGMENTS: highlight( algotext, PythonLexer(), TerminalFormatter(), outfile=sys.stdout, ) else: click.echo(algotext) if trading_calendar is None: trading_calendar = get_calendar('XSHG') # date parameter validation if trading_calendar.session_distance(start, end) < 1: raise _RunAlgoError( 'There are no trading days between %s and %s' % ( start.date(), end.date(), ), ) bundle_data = bundles.load( bundle, environ, bundle_timestamp, ) first_trading_day = \ bundle_data.equity_minute_bar_reader.first_trading_day data = DataPortal( bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=first_trading_day, equity_minute_reader=bundle_data.equity_minute_bar_reader, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader, ) pipeline_loader = CNEquityPricingLoader( bundle_data.equity_daily_bar_reader, bundle_data.adjustment_reader, ) def choose_loader(column): if column in CNEquityPricing.columns: return pipeline_loader # # 简单处理 elif type(column) == BoundColumn: # # 使用实例才能避免KeyError return global_loader raise ValueError( "No PipelineLoader registered for column %s." % column ) if isinstance(metrics_set, six.string_types): try: metrics_set = metrics.load(metrics_set) except ValueError as e: raise _RunAlgoError(str(e)) if isinstance(blotter, six.string_types): try: blotter = load(Blotter, blotter) except ValueError as e: raise _RunAlgoError(str(e)) perf = TradingAlgorithm( namespace=namespace, data_portal=data, get_pipeline_loader=choose_loader, trading_calendar=trading_calendar, sim_params=SimulationParameters( start_session=start, end_session=end, trading_calendar=trading_calendar, capital_base=capital_base, data_frequency=data_frequency, ), metrics_set=metrics_set, blotter=blotter, benchmark_returns=benchmark_returns, **{ 'initialize': initialize, 'handle_data': handle_data, 'before_trading_start': before_trading_start, 'analyze': analyze, } if algotext is None else { 'algo_filename': getattr(algofile, 'name', '<algorithm>'), 'script': algotext, } ).run() if output == '-': click.echo(str(perf)) elif output != os.devnull: # make the zipline magic not write any data perf.to_pickle(output) return perf
def _run(handle_data, initialize, before_trading_start, analyze, algofile, algotext, defines, data_frequency, capital_base, bundle, bundle_timestamp, start, end, output, trading_calendar, print_algo, metrics_set, local_namespace, environ, blotter, benchmark_returns, broker, state_filename, realtime_bar_target, performance_callback, stop_execution_callback, teardown, execution_id): """ Run a backtest for the given algorithm. This is shared between the cli and :func:`zipline.run_algo`. zipline-live additions: broker - wrapper to connect to a real broker state_filename - saving the context of the algo to be able to restart performance_callback - a callback to send performance results everyday and not only at the end of the backtest. this allows to run live, and monitor the performance of the algorithm stop_execution_callback - A callback to check if execution should be stopped. it is used to be able to stop live trading (also simulation could be stopped using this) execution. if the callback returns True, then algo execution will be aborted. teardown - algo method like handle_data() or before_trading_start() that is called when the algo execution stops execution_id - unique id to identify this execution (backtest or live instance) """ if benchmark_returns is None: benchmark_returns, _ = load_market_data(environ=environ) emission_rate = 'daily' if broker: emission_rate = 'minute' # if we run zipline as a command line tool, these will probably not be initiated if not start: start = pd.Timestamp.utcnow() if not end: # in cli mode, sessions are 1 day only. and it will be re-ran each day by user end = start + pd.Timedelta('1 day') if algotext is not None: if local_namespace: ip = get_ipython() # noqa namespace = ip.user_ns else: namespace = {} for assign in defines: try: name, value = assign.split('=', 2) except ValueError: raise ValueError( 'invalid define %r, should be of the form name=value' % assign, ) try: # evaluate in the same namespace so names may refer to # eachother namespace[name] = eval(value, namespace) except Exception as e: raise ValueError( 'failed to execute definition for name %r: %s' % (name, e), ) elif defines: raise _RunAlgoError( 'cannot pass define without `algotext`', "cannot pass '-D' / '--define' without '-t' / '--algotext'", ) else: namespace = {} if algofile is not None: algotext = algofile.read() if print_algo: if PYGMENTS: highlight( algotext, PythonLexer(), TerminalFormatter(), outfile=sys.stdout, ) else: click.echo(algotext) if trading_calendar is None: trading_calendar = get_calendar('NYSE') # date parameter validation if trading_calendar.session_distance(start, end) < 1: raise _RunAlgoError( 'There are no trading days between %s and %s' % ( start.date(), end.date(), ), ) bundle_data = bundles.load( bundle, environ, bundle_timestamp, ) first_trading_day = \ bundle_data.equity_minute_bar_reader.first_trading_day DataPortalClass = (partial(DataPortalLive, broker) if broker else DataPortal) data = DataPortalClass( bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=first_trading_day, equity_minute_reader=bundle_data.equity_minute_bar_reader, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader, ) pipeline_loader = USEquityPricingLoader( bundle_data.equity_daily_bar_reader, bundle_data.adjustment_reader, ) def choose_loader(column): if column in USEquityPricing.columns: return pipeline_loader raise ValueError("No PipelineLoader registered for column %s." % column) if isinstance(metrics_set, six.string_types): try: metrics_set = metrics.load(metrics_set) except ValueError as e: raise _RunAlgoError(str(e)) if isinstance(blotter, six.string_types): try: blotter = load(Blotter, blotter) except ValueError as e: raise _RunAlgoError(str(e)) TradingAlgorithmClass = (partial(LiveTradingAlgorithm, broker=broker, state_filename=state_filename, realtime_bar_target=realtime_bar_target) if broker else TradingAlgorithm) perf = TradingAlgorithmClass( namespace=namespace, data_portal=data, get_pipeline_loader=choose_loader, trading_calendar=trading_calendar, sim_params=SimulationParameters(start_session=start, end_session=end, trading_calendar=trading_calendar, capital_base=capital_base, emission_rate=emission_rate, data_frequency=data_frequency, execution_id=execution_id), metrics_set=metrics_set, blotter=blotter, benchmark_returns=benchmark_returns, performance_callback=performance_callback, stop_execution_callback=stop_execution_callback, **{ 'initialize': initialize, 'handle_data': handle_data, 'before_trading_start': before_trading_start, 'analyze': analyze, 'teardown': teardown, } if algotext is None else { 'algo_filename': getattr(algofile, 'name', '<algorithm>'), 'script': algotext, }).run() if output == '-': click.echo(str(perf)) elif output != os.devnull: # make the zipline magic not write any data perf.to_pickle(output) return perf
def surcharge_market_data(self, bm_symbol='^GSPC'): bm_bt, tr_bt = zipline.load_market_data(bm_symbol) bm_live, tr_live = self._load_live_market_data(bm_symbol) bm = bm_bt.append(bm_live) tr_bt.update(tr_live) return bm, tr_bt