def initialize(self, *args, **kwargs): """ Call self._initialize with `self` made available to Zipline API functions. """ with ZiplineAPI(self): self._initialize(self)
def initialize(self, *args, **kwargs): self._context_persistence_excludes = (list(self.__dict__.keys()) + ['trading_client']) if os.path.isfile(self.state_filename): log.info("Loading state from {}".format(self.state_filename)) load_context(self.state_filename, context=self, checksum=self.algo_filename) return with ZiplineAPI(self): super(self.__class__, self).initialize(*args, **kwargs) store_context(self.state_filename, context=self, checksum=self.algo_filename, exclude_list=self._context_persistence_excludes)
def test_zipline_api_resolves_dynamically(self): # Make a dummy algo. algo = TradingAlgorithm( initialize=lambda context: None, handle_data=lambda context, data: None, sim_params=self.sim_params, ) # Verify that api methods get resolved dynamically by patching them out # and then calling them for method in algo.all_api_methods(): name = method.__name__ sentinel = object() def fake_method(*args, **kwargs): return sentinel setattr(algo, name, fake_method) with ZiplineAPI(algo): self.assertIs(sentinel, getattr(zipline.api, name)())
def initialize(self, *args, **kwargs): self._context_persistence_excludes = \ self._context_persistence_blacklist + \ [e for e in self.__dict__.keys() if e not in self._context_persistence_whitelist] if os.path.isfile(self.state_filename): log.info("Loading state from {}".format(self.state_filename)) load_context(self.state_filename, context=self, checksum=self.algo_filename) return with ZiplineAPI(self): super(self.__class__, self).initialize(*args, **kwargs) store_context(self.state_filename, context=self, checksum=self.algo_filename, exclude_list=self._context_persistence_excludes)
def transform(self): """ Main generator work loop. """ algo = self.algo metrics_tracker = algo.metrics_tracker emission_rate = metrics_tracker.emission_rate def every_bar( dt_to_use, current_data=self.current_data, handle_data=algo.event_manager.handle_data, ): for capital_change in calculate_minute_capital_changes(dt_to_use): yield capital_change self.simulation_dt = dt_to_use # called every tick (minute or day). algo.on_dt_changed(dt_to_use) blotter = algo.blotter # handle any transactions and commissions coming out new orders # placed in the last bar new_transactions, new_commissions, closed_orders = blotter.get_transactions( current_data) blotter.prune_orders(closed_orders) for transaction in new_transactions: metrics_tracker.process_transaction(transaction) # since this order was modified, record it order = blotter.orders[transaction.order_id] metrics_tracker.process_order(order) for commission in new_commissions: metrics_tracker.process_commission(commission) handle_data(algo, current_data, dt_to_use) # grab any new orders from the blotter, then clear the list. # this includes cancelled orders. new_orders = blotter.new_orders blotter.new_orders = [] # if we have any new orders, record them so that we know # in what perf period they were placed. for new_order in new_orders: metrics_tracker.process_order(new_order) def once_a_day(midnight_dt, current_data=self.current_data, data_portal=self.data_portal): # process any capital changes that came overnight for capital_change in algo.calculate_capital_changes( midnight_dt, emission_rate=emission_rate, is_interday=True): yield capital_change # set all the timestamps self.simulation_dt = midnight_dt algo.on_dt_changed(midnight_dt) metrics_tracker.handle_market_open( midnight_dt, algo.data_portal, ) # handle any splits that impact any positions or any open orders. assets_we_care_about = (metrics_tracker.positions.keys() | algo.blotter.open_orders.keys()) if assets_we_care_about: splits = data_portal.get_splits(assets_we_care_about, midnight_dt) if splits: algo.blotter.process_splits(splits) metrics_tracker.handle_splits(splits) def on_exit(): # Remove references to algo, data portal, et al to break cycles # and ensure deterministic cleanup of these objects when the # simulation finishes. self.algo = None self.benchmark_source = self.current_data = self.data_portal = None with ExitStack() as stack: stack.callback(on_exit) stack.enter_context(self.processor) stack.enter_context(ZiplineAPI(self.algo)) if algo.data_frequency == "minute": def execute_order_cancellation_policy(): algo.blotter.execute_cancel_policy(SESSION_END) def calculate_minute_capital_changes(dt): # process any capital changes that came between the last # and current minutes return algo.calculate_capital_changes( dt, emission_rate=emission_rate, is_interday=False) else: def execute_order_cancellation_policy(): pass def calculate_minute_capital_changes(dt): return [] for dt, action in self.clock: if action == BAR: for capital_change_packet in every_bar(dt): yield capital_change_packet elif action == SESSION_START: for capital_change_packet in once_a_day(dt): yield capital_change_packet elif action == SESSION_END: # End of the session. positions = metrics_tracker.positions position_assets = algo.asset_finder.retrieve_all(positions) self._cleanup_expired_assets(dt, position_assets) execute_order_cancellation_policy() algo.validate_account_controls() yield self._get_daily_message(dt, algo, metrics_tracker) elif action == BEFORE_TRADING_START_BAR: self.simulation_dt = dt algo.on_dt_changed(dt) algo.before_trading_start(self.current_data) elif action == MINUTE_END: minute_msg = self._get_minute_message( dt, algo, metrics_tracker, ) yield minute_msg risk_message = metrics_tracker.handle_simulation_end( self.data_portal, ) yield risk_message
def run(self, source, overwrite_sim_params=True, benchmark_return_source=None): """Run the algorithm. :Arguments: source : can be either: - pandas.DataFrame - zipline source - list of sources If pandas.DataFrame is provided, it must have the following structure: * column names must consist of ints representing the different sids * index must be DatetimeIndex * array contents should be price info. :Returns: daily_stats : pandas.DataFrame Daily performance metrics such as returns, alpha etc. """ if isinstance(source, list): if overwrite_sim_params: warnings.warn("""List of sources passed, will not attempt to extract sids, and start and end dates. Make sure to set the correct fields in sim_params passed to __init__().""", UserWarning) overwrite_sim_params = False elif isinstance(source, pd.DataFrame): # if DataFrame provided, wrap in DataFrameSource source = DataFrameSource(source) elif isinstance(source, pd.Panel): source = DataPanelSource(source) if isinstance(source, list): self.set_sources(source) else: self.set_sources([source]) # Override sim_params if params are provided by the source. if overwrite_sim_params: if hasattr(source, 'start'): self.sim_params.period_start = source.start if hasattr(source, 'end'): self.sim_params.period_end = source.end all_sids = [sid for s in self.sources for sid in s.sids] self.sim_params.sids = set(all_sids) # Changing period_start and period_close might require updating # of first_open and last_close. self.sim_params._update_internal() # Create history containers if len(self.history_specs) != 0: self.history_container = HistoryContainer( self.history_specs, self.sim_params.sids, self.sim_params.first_open) # Create transforms by wrapping them into StatefulTransforms self.transforms = [] for namestring, trans_descr in iteritems(self.registered_transforms): sf = StatefulTransform( trans_descr['class'], *trans_descr['args'], **trans_descr['kwargs'] ) sf.namestring = namestring self.transforms.append(sf) # force a reset of the performance tracker, in case # this is a repeat run of the algorithm. self.perf_tracker = None # create transforms and zipline self.gen = self._create_generator(self.sim_params) with ZiplineAPI(self): # loop through simulated_trading, each iteration returns a # perf dictionary perfs = [] for perf in self.gen: perfs.append(perf) # convert perf dict to pandas dataframe daily_stats = self._create_daily_stats(perfs) self.analyze(daily_stats) return daily_stats
def analyze(self, perf): if self._analyze is None: return with ZiplineAPI(self): self._analyze(self, perf)
def transform(self): """ Main generator work loop. """ algo = self.algo algo.data_portal = self.data_portal handle_data = algo.event_manager.handle_data current_data = self.current_data data_portal = self.data_portal # can't cache a pointer to algo.perf_tracker because we're not # guaranteed that the algo doesn't swap out perf trackers during # its lifetime. # likewise, we can't cache a pointer to the blotter. algo.perf_tracker.position_tracker.data_portal = data_portal def every_bar(dt_to_use): # called every tick (minute or day). self.simulation_dt = dt_to_use algo.on_dt_changed(dt_to_use) blotter = algo.blotter perf_tracker = algo.perf_tracker # handle any transactions and commissions coming out new orders # placed in the last bar new_transactions, new_commissions = \ blotter.get_transactions(current_data) for transaction in new_transactions: perf_tracker.process_transaction(transaction) # since this order was modified, record it order = blotter.orders[transaction.order_id] perf_tracker.process_order(order) if new_commissions: for commission in new_commissions: perf_tracker.process_commission(commission) handle_data(algo, current_data, dt_to_use) # grab any new orders from the blotter, then clear the list. # this includes cancelled orders. new_orders = blotter.new_orders blotter.new_orders = [] # if we have any new orders, record them so that we know # in what perf period they were placed. if new_orders: for new_order in new_orders: perf_tracker.process_order(new_order) self.algo.portfolio_needs_update = True self.algo.account_needs_update = True self.algo.performance_needs_update = True def once_a_day(midnight_dt): # Get the positions before updating the date so that prices are # fetched for trading close instead of midnight positions = algo.perf_tracker.position_tracker.positions position_assets = algo.asset_finder.retrieve_all(positions) # set all the timestamps self.simulation_dt = midnight_dt algo.on_dt_changed(midnight_dt) # we want to wait until the clock rolls over to the next day # before cleaning up expired assets. self._cleanup_expired_assets(midnight_dt, position_assets) perf_tracker = algo.perf_tracker # handle any splits that impact any positions or any open orders. assets_we_care_about = \ viewkeys(perf_tracker.position_tracker.positions) | \ viewkeys(algo.blotter.open_orders) if assets_we_care_about: splits = data_portal.get_splits(assets_we_care_about, midnight_dt) if splits: algo.blotter.process_splits(splits) perf_tracker.position_tracker.handle_splits(splits) # call before trading start algo.before_trading_start(current_data) def handle_benchmark(date): algo.perf_tracker.all_benchmark_returns[date] = \ self.benchmark_source.get_value(date) with ExitStack() as stack: stack.enter_context(self.processor) stack.enter_context(ZiplineAPI(self.algo)) if algo.data_frequency == 'minute': def execute_order_cancellation_policy(): algo.blotter.execute_cancel_policy(DAY_END) else: def execute_order_cancellation_policy(): pass for dt, action in self.clock: if action == BAR: every_bar(dt) elif action == DAY_START: once_a_day(dt) elif action == DAY_END: # End of the day. execute_order_cancellation_policy() handle_benchmark(normalize_date(dt)) yield self._get_daily_message(dt, algo, algo.perf_tracker) elif action == MINUTE_END: handle_benchmark(dt) minute_msg, daily_msg = \ self._get_minute_message(dt, algo, algo.perf_tracker) yield minute_msg if daily_msg: yield daily_msg risk_message = algo.perf_tracker.handle_simulation_end() yield risk_message
def transform(self): """ Main generator work loop. """ algo = self.algo emission_rate = algo.perf_tracker.emission_rate def every_bar(dt_to_use, current_data=self.current_data, handle_data=algo.event_manager.handle_data): # called every tick (minute or day). algo.on_dt_changed(dt_to_use) for capital_change in calculate_minute_capital_changes(dt_to_use): yield capital_change self.simulation_dt = dt_to_use blotter = algo.blotter perf_tracker = algo.perf_tracker # handle any transactions and commissions coming out new orders # placed in the last bar new_transactions, new_commissions, closed_orders = \ blotter.get_transactions(current_data) blotter.prune_orders(closed_orders) for transaction in new_transactions: perf_tracker.process_transaction(transaction) # since this order was modified, record it order = blotter.orders[transaction.order_id] perf_tracker.process_order(order) if new_commissions: for commission in new_commissions: perf_tracker.process_commission(commission) handle_data(algo, current_data, dt_to_use) # grab any new orders from the blotter, then clear the list. # this includes cancelled orders. new_orders = blotter.new_orders blotter.new_orders = [] # if we have any new orders, record them so that we know # in what perf period they were placed. if new_orders: for new_order in new_orders: perf_tracker.process_order(new_order) algo.portfolio_needs_update = True algo.account_needs_update = True algo.performance_needs_update = True def once_a_day(midnight_dt, current_data=self.current_data, data_portal=self.data_portal): perf_tracker = algo.perf_tracker # Get the positions before updating the date so that prices are # fetched for trading close instead of midnight positions = algo.perf_tracker.position_tracker.positions position_assets = algo.asset_finder.retrieve_all(positions) # set all the timestamps self.simulation_dt = midnight_dt algo.on_dt_changed(midnight_dt) # process any capital changes that came overnight for capital_change in algo.calculate_capital_changes( midnight_dt, emission_rate=emission_rate, is_interday=True): yield capital_change # we want to wait until the clock rolls over to the next day # before cleaning up expired assets. self._cleanup_expired_assets(midnight_dt, position_assets) # handle any splits that impact any positions or any open orders. assets_we_care_about = \ viewkeys(perf_tracker.position_tracker.positions) | \ viewkeys(algo.blotter.open_orders) if assets_we_care_about: splits = data_portal.get_splits(assets_we_care_about, midnight_dt) if splits: algo.blotter.process_splits(splits) perf_tracker.position_tracker.handle_splits(splits) def handle_benchmark(date, benchmark_source=self.benchmark_source): try: algo.perf_tracker.all_benchmark_returns[date] = \ benchmark_source.get_value(date) except Exception as e: # algo.perf_tracker.all_benchmark_returns[date] = 1 pass def on_exit(): # Remove references to algo, data portal, et al to break cycles # and ensure deterministic cleanup of these objects when the # simulation finishes. self.algo = None self.benchmark_source = self.current_data = self.data_portal = None with ExitStack() as stack: stack.callback(on_exit) stack.enter_context(self.processor) stack.enter_context(ZiplineAPI(self.algo)) if algo.data_frequency == 'minute': def execute_order_cancellation_policy(): algo.blotter.execute_cancel_policy(SESSION_END) def calculate_minute_capital_changes(dt): # process any capital changes that came between the last # and current minutes return algo.calculate_capital_changes( dt, emission_rate=emission_rate, is_interday=False) else: def execute_order_cancellation_policy(): pass def calculate_minute_capital_changes(dt): return [] for dt, action in self.clock: if action == BAR: for capital_change_packet in every_bar(dt): yield capital_change_packet elif action == SESSION_START: for capital_change_packet in once_a_day(dt): yield capital_change_packet elif action == SESSION_END: # End of the session. if emission_rate == 'daily': handle_benchmark(normalize_date(dt)) execute_order_cancellation_policy() yield self._get_daily_message(dt, algo, algo.perf_tracker) elif action == BEFORE_TRADING_START_BAR: self.simulation_dt = dt algo.on_dt_changed(dt) algo.before_trading_start(self.current_data) elif action == MINUTE_END: handle_benchmark(dt) minute_msg = \ self._get_minute_message(dt, algo, algo.perf_tracker) yield minute_msg risk_message = algo.perf_tracker.handle_simulation_end() yield risk_message
def run(self, source, sim_params=None, benchmark_return_source=None): """Run the algorithm. :Arguments: source : can be either: - pandas.DataFrame - zipline source - list of zipline sources If pandas.DataFrame is provided, it must have the following structure: * column names must consist of ints representing the different sids * index must be DatetimeIndex * array contents should be price info. :Returns: daily_stats : pandas.DataFrame Daily performance metrics such as returns, alpha etc. """ if isinstance(source, (list, tuple)): assert self.sim_params is not None or sim_params is not None, \ """When providing a list of sources, \ sim_params have to be specified as a parameter or in the constructor.""" elif isinstance(source, pd.DataFrame): # if DataFrame provided, wrap in DataFrameSource source = DataFrameSource(source) elif isinstance(source, pd.Panel): source = DataPanelSource(source) if not isinstance(source, (list, tuple)): self.sources = [source] else: self.sources = source # Check for override of sim_params. # If it isn't passed to this function, # use the default params set with the algorithm. # Else, we create simulation parameters using the start and end of the # source provided. if sim_params is None: if self.sim_params is None: start = source.start end = source.end sim_params = create_simulation_parameters( start=start, end=end, capital_base=self.capital_base, ) else: sim_params = self.sim_params # update sim params to ensure it's set self.sim_params = sim_params if self.sim_params.sids is None: all_sids = [sid for s in self.sources for sid in s.sids] self.sim_params.sids = set(all_sids) # Create history containers if len(self.history_specs) != 0: self.history_container = HistoryContainer( self.history_specs, self.sim_params.sids, self.sim_params.first_open) # Create transforms by wrapping them into StatefulTransforms self.transforms = [] for namestring, trans_descr in iteritems(self.registered_transforms): sf = StatefulTransform(trans_descr['class'], *trans_descr['args'], **trans_descr['kwargs']) sf.namestring = namestring self.transforms.append(sf) # force a reset of the performance tracker, in case # this is a repeat run of the algorithm. self.perf_tracker = None # create transforms and zipline self.gen = self._create_generator(sim_params) with ZiplineAPI(self): # loop through simulated_trading, each iteration returns a # perf dictionary perfs = [] for perf in self.gen: perfs.append(perf) # convert perf dict to pandas dataframe daily_stats = self._create_daily_stats(perfs) self.analyze(daily_stats) return daily_stats
def run(self, source, overwrite_sim_params=True, benchmark_return_source=None): """Run the algorithm. :Arguments: source : can be either: - pandas.DataFrame - zipline source - list of sources If pandas.DataFrame is provided, it must have the following structure: * column names must be the different asset identifiers * index must be DatetimeIndex * array contents should be price info. :Returns: daily_stats : pandas.DataFrame Daily performance metrics such as returns, alpha etc. """ # Ensure that source is a DataSource object if isinstance(source, list): if overwrite_sim_params: warnings.warn( """List of sources passed, will not attempt to extract start and end dates. Make sure to set the correct fields in sim_params passed to __init__().""", UserWarning) overwrite_sim_params = False elif isinstance(source, pd.DataFrame): # if DataFrame provided, wrap in DataFrameSource source = DataFrameSource(source) elif isinstance(source, pd.Panel): source = DataPanelSource(source) if isinstance(source, list): self.set_sources(source) else: self.set_sources([source]) # Override sim_params if params are provided by the source. if overwrite_sim_params: if hasattr(source, 'start'): self.sim_params.period_start = source.start if hasattr(source, 'end'): self.sim_params.period_end = source.end # The sids field of the source is the canonical reference for # sids in this run all_sids = [sid for s in self.sources for sid in s.sids] self.sim_params.sids = set(all_sids) # Check that all sids from the source are accounted for in # the AssetFinder for sid in self.sim_params.sids: try: self.asset_finder.retrieve_asset(sid) except SidNotFound: warnings.warn("No Asset found for sid '%s'. Make sure " "that the correct identifiers and asset " "metadata are passed to __init__()." % sid) # Changing period_start and period_close might require updating # of first_open and last_close. self.sim_params._update_internal() # force a reset of the performance tracker, in case # this is a repeat run of the algorithm. self.perf_tracker = None # create zipline self.gen = self._create_generator(self.sim_params) # Create history containers if self.history_specs: self.history_container = self.history_container_class( self.history_specs, self.sim_params.sids, self.sim_params.first_open, self.sim_params.data_frequency, ) with ZiplineAPI(self): # loop through simulated_trading, each iteration returns a # perf dictionary perfs = [] for perf in self.gen: perfs.append(perf) # convert perf dict to pandas dataframe daily_stats = self._create_daily_stats(perfs) self.analyze(daily_stats) return daily_stats
def transform(self, stream_in): """ Main generator work loop. """ # Initialize the mkt_close mkt_open = self.algo.perf_tracker.market_open mkt_close = self.algo.perf_tracker.market_close # inject the current algo # snapshot time to any log record generated. with ExitStack() as stack: stack.enter_context(self.processor) stack.enter_context(ZiplineAPI(self.algo)) data_frequency = self.sim_params.data_frequency self._call_before_trading_start(mkt_open) for date, snapshot in stream_in: self.simulation_dt = date self.on_dt_changed(date) # If we're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. if date < self.algo_start: for event in snapshot: if event.type == DATASOURCE_TYPE.SPLIT: self.algo.blotter.process_split(event) elif event.type == DATASOURCE_TYPE.TRADE: self.update_universe(event) self.algo.perf_tracker.process_trade(event) elif event.type == DATASOURCE_TYPE.CUSTOM: self.update_universe(event) else: messages = self._process_snapshot( date, snapshot, self.algo.instant_fill, ) # Perf messages are only emitted if the snapshot contained # a benchmark event. for message in messages: yield message # When emitting minutely, we need to call # before_trading_start before the next trading day begins if date == mkt_close: if mkt_close <= self.algo.perf_tracker.last_close: before_last_close = \ mkt_close < self.algo.perf_tracker.last_close try: mkt_open, mkt_close = \ self.env.next_open_and_close(mkt_close) except NoFurtherDataError: # If at the end of backtest history, # skip advancing market close. pass if before_last_close: self._call_before_trading_start(mkt_open) elif data_frequency == 'daily': next_day = self.env.next_trading_day(date) if next_day is not None and \ next_day < self.algo.perf_tracker.last_close: self._call_before_trading_start(next_day) self.algo.portfolio_needs_update = True self.algo.account_needs_update = True self.algo.performance_needs_update = True risk_message = self.algo.perf_tracker.handle_simulation_end() yield risk_message
def transform(self): """ Main generator work loop. """ algo = self.algo def every_bar(dt_to_use, current_data=self.current_data, handle_data=algo.event_manager.handle_data): # called every tick (minute or day). if dt_to_use in algo.capital_changes: process_minute_capital_changes(dt_to_use) self.simulation_dt = dt_to_use algo.on_dt_changed(dt_to_use) blotter = algo.blotter perf_tracker = algo.perf_tracker # handle any transactions and commissions coming out new orders # placed in the last bar new_transactions, new_commissions, closed_orders = \ blotter.get_transactions(current_data) blotter.prune_orders(closed_orders) for transaction in new_transactions: perf_tracker.process_transaction(transaction) # since this order was modified, record it order = blotter.orders[transaction.order_id] perf_tracker.process_order(order) if new_commissions: for commission in new_commissions: perf_tracker.process_commission(commission) handle_data(algo, current_data, dt_to_use) # grab any new orders from the blotter, then clear the list. # this includes cancelled orders. new_orders = blotter.new_orders blotter.new_orders = [] # if we have any new orders, record them so that we know # in what perf period they were placed. if new_orders: for new_order in new_orders: perf_tracker.process_order(new_order) algo.portfolio_needs_update = True algo.account_needs_update = True algo.performance_needs_update = True def once_a_day(midnight_dt, current_data=self.current_data, data_portal=self.data_portal): perf_tracker = algo.perf_tracker if midnight_dt in algo.capital_changes: # process any capital changes that came overnight change = algo.capital_changes[midnight_dt] log.info('Processing capital change of %s at %s' % (change, midnight_dt)) perf_tracker.process_capital_changes(change, is_interday=True) # Get the positions before updating the date so that prices are # fetched for trading close instead of midnight positions = algo.perf_tracker.position_tracker.positions position_assets = algo.asset_finder.retrieve_all(positions) # set all the timestamps self.simulation_dt = midnight_dt algo.on_dt_changed(midnight_dt) # we want to wait until the clock rolls over to the next day # before cleaning up expired assets. self._cleanup_expired_assets(midnight_dt, position_assets) # handle any splits that impact any positions or any open orders. assets_we_care_about = \ viewkeys(perf_tracker.position_tracker.positions) | \ viewkeys(algo.blotter.open_orders) if assets_we_care_about: splits = data_portal.get_splits(assets_we_care_about, midnight_dt) if splits: algo.blotter.process_splits(splits) perf_tracker.position_tracker.handle_splits(splits) # call before trading start algo.before_trading_start(current_data) def handle_benchmark(date, benchmark_source=self.benchmark_source): algo.perf_tracker.all_benchmark_returns[date] = \ benchmark_source.get_value(date) def on_exit(): # Remove references to algo, data portal, et al to break cycles # and ensure deterministic cleanup of these objects when the # simulation finishes. self.algo = None self.benchmark_source = self.current_data = self.data_portal = None with ExitStack() as stack: stack.callback(on_exit) stack.enter_context(self.processor) stack.enter_context(ZiplineAPI(self.algo)) if algo.data_frequency == 'minute': def execute_order_cancellation_policy(): algo.blotter.execute_cancel_policy(DAY_END) def process_minute_capital_changes(dt): # If we are running daily emission, prices won't # necessarily be synced at the end of every minute, and we # need the up-to-date prices for capital change # calculations. We want to sync the prices as of the # last market minute, and this is okay from a data portal # perspective as we have technically not "advanced" to the # current dt yet. algo.perf_tracker.position_tracker.sync_last_sale_prices( self.env.previous_market_minute(dt), False, self.data_portal ) # process any capital changes that came between the last # and current minutes change = algo.capital_changes[dt] log.info('Processing capital change of %s at %s' % (change, dt)) algo.perf_tracker.process_capital_changes( change, is_interday=False ) else: def execute_order_cancellation_policy(): pass def process_minute_capital_changes(dt): pass for dt, action in self.clock: if action == BAR: every_bar(dt) elif action == DAY_START: once_a_day(dt) elif action == DAY_END: # End of the day. if algo.perf_tracker.emission_rate == 'daily': handle_benchmark(normalize_date(dt)) execute_order_cancellation_policy() yield self._get_daily_message(dt, algo, algo.perf_tracker) elif action == MINUTE_END: handle_benchmark(dt) minute_msg = \ self._get_minute_message(dt, algo, algo.perf_tracker) yield minute_msg risk_message = algo.perf_tracker.handle_simulation_end() yield risk_message
def transform(self, stream_in): """ Main generator work loop. """ # Initialize the mkt_close mkt_open = self.algo.perf_tracker.market_open mkt_close = self.algo.perf_tracker.market_close # inject the current algo # snapshot time to any log record generated. # with 。。。as 是一种上下文管理器,打开与关闭。exitstack()是一个语法糖 with ExitStack() as stack: stack.enter_context(self.processor) stack.enter_context(ZiplineAPI(self.algo)) data_frequency = self.sim_params.data_frequency self._call_before_trading_start(mkt_open) for date, snapshot in stream_in: #print date,u'在主循环之内的date',self.algo_start #raw_input() #for i in snapshot: # print i #raw_input() # snapshot,为迭代的数据系统,包括时间,股票数据等 # 进入主循环,跟随日期进行循环 self.simulation_dt = date #模拟日期 self.on_dt_changed(date) # If we're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. # 如果在热身阶段 # 判断是否进入交易日期,若开始了则handle_data发送给 # 判断是否进行到达模拟开始的时间 if date < self.algo_start: for event in snapshot: if event.type == DATASOURCE_TYPE.SPLIT: self.algo.blotter.process_split(event) elif event.type == DATASOURCE_TYPE.TRADE: self.update_universe(event) self.algo.perf_tracker.process_trade(event) elif event.type == DATASOURCE_TYPE.CUSTOM: self.update_universe(event) if self.algo.history_container: #print self.current_data self.algo.history_container.update( self.current_data, date) else: # 进入每日信息的处理, messages = self._process_snapshot( date, snapshot, self.algo.instant_fill, ) # Perf messages are only emitted if the snapshot contained # a benchmark event. for message in messages: yield message # When emitting minutely, we need to call # before_trading_start before the next trading day begins if date == mkt_close: if mkt_close <= self.algo.perf_tracker.last_close: before_last_close = \ mkt_close < self.algo.perf_tracker.last_close try: mkt_open, mkt_close = \ self.env.next_open_and_close(mkt_close) except NoFurtherDataError: # If at the end of backtest history, # skip advancing market close. pass if before_last_close: self._call_before_trading_start(mkt_open) elif data_frequency == 'daily': next_day = self.env.next_trading_day(date) if next_day is not None and \ next_day < self.algo.perf_tracker.last_close: self._call_before_trading_start( next_day ) #如果下一天非空,并且next不是表现的最后一天。last_close,就 self.algo.portfolio_needs_update = True self.algo.account_needs_update = True self.algo.performance_needs_update = True risk_message = self.algo.perf_tracker.handle_simulation_end() yield risk_message