def __init__(self, blotter, perf_tracker, algo, algo_start): # ========== # Algo Setup # ========== # We extract the order book from the txn client so that # the algo can place new orders. self.blotter = blotter self.perf_tracker = perf_tracker self.perf_key = self.EMISSION_TO_PERF_KEY_MAP[ perf_tracker.emission_rate] self.algo = algo self.algo_start = algo_start.replace(hour=0, minute=0, second=0, microsecond=0) # Monkey patch the user algorithm to place orders in the # TransactionSimulator's order book and use our logger. self.algo.set_order(self.order) # ============== # Snapshot Setup # ============== # The algorithm's universe as of our most recent event. # We want an ndict that will have empty objects as default # values on missing keys. self.universe = ndict(internal=defaultdict(SIDData)) # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None self.snapshot_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if not 'algo_dt' in record.extra: record.extra['algo_dt'] = self.snapshot_dt self.processor = Processor(inject_algo_dt)
def __init__(self, algo, sim_params): # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params # ============== # Algo Setup # ============== self.algo = algo self.algo_start = self.sim_params.first_open self.algo_start = self.algo_start.replace(hour=0, minute=0, second=0, microsecond=0) # ============== # Snapshot Setup # ============== # The algorithm's data as of our most recent event. # We want an object that will have empty objects as default # values on missing keys. self.current_data = BarData() # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if not 'algo_dt' in record.extra: record.extra['algo_dt'] = self.simulation_dt self.processor = Processor(inject_algo_dt)
def _bootstrap(): """Get the Inyoka version and store it.""" global INYOKA_REVISION # the path to the contents of the Inyoka module conts = os.environ.setdefault('INYOKA_MODULE', realpath(join(dirname(__file__)))) # the path to the Inyoka instance folder os.environ['INYOKA_INSTANCE'] = realpath(join(conts, pardir)) os.environ['CELERY_LOADER'] = 'inyoka.core.celery_support.CeleryLoader' # get the `INYOKA_REVISION` using the mercurial python api try: ui = hgui.ui() repository = localrepository(ui, join(conts, '..')) ctx = repository['tip'] INYOKA_REVISION = ('%(num)s:%(id)s' % { 'num': ctx.rev(), 'id': shorthex(ctx.node()) }) except TypeError: # fail silently pass # This value defines the timeout for sockets in seconds. Per default python # sockets do never timeout and as such we have blocking workers. # Socket timeouts are set globally within the whole application. # The value *must* be a floating point value. socket.setdefaulttimeout(10.0) #: bind the context ctx = ApplicationContext() ctx.bind() # setup components ctx.load_packages(ctx.cfg['activated_components']) if ctx.cfg['testing']: logger.level_name = 'ERROR' # makes INYOKA_REVISION visible in the extra dict of every log record proc = Processor(lambda x: x.extra.update(INYOKA_REVISION=INYOKA_REVISION)) proc.push_application()
async def task(task_name: str): def inject_extra(record): record.extra['task_name'] = task_name record.extra['task_id'] = id(asyncio.current_task()) with Handler(bubble=True).contextbound(): with Processor(inject_extra).contextbound(): logger.info('I am the task') await asyncio.sleep(0) await util() logger.info('I am still the task')
def __init__(self, algo, sim_params, data_portal, clock, benchmark_source, restrictions, universe_func, instant_fill=False): # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params self.env = algo.trading_environment self.data_portal = data_portal self.restrictions = restrictions self.instant_fill = instant_fill # ============== # Algo Setup # ============== self.algo = algo # ============== # Snapshot Setup # ============== # This object is the way that user algorithms interact with OHLCV data, # fetcher data, and some API methods like `data.can_trade`. self.current_data = self._create_bar_data(universe_func) # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None self.clock = clock self.benchmark_source = benchmark_source # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if 'algo_dt' not in record.extra: record.extra['algo_dt'] = self.simulation_dt self.processor = Processor(inject_algo_dt)
def test_mdc_works(): def inject_extra(record): record.extra['ip'] = '127.0.0.1' record.extra['username'] = '******' with TestHandler() as handler: handler.formatter = text_formatter logger = logging.getLogger('Dummy') with redirected_logging(): with Processor(inject_extra): logger.info('hello world') assert len(handler.formatted_records) == 1 assert 'INFO: Dummy: hello world <ip=127.0.0.1, username=Andrey>' in handler.formatted_records[0]
def _bootstrap(): """Get the Inyoka version and store it.""" global INYOKA_REVISION # the path to the contents of the Inyoka module conts = os.environ.setdefault("INYOKA_MODULE", realpath(join(dirname(__file__)))) # the path to the Inyoka instance folder os.environ["INYOKA_INSTANCE"] = realpath(join(conts, pardir)) os.environ["CELERY_LOADER"] = "inyoka.core.celery_support.CeleryLoader" # get the `INYOKA_REVISION` using the mercurial python api try: ui = hgui.ui() repository = localrepository(ui, join(conts, "..")) ctx = repository["tip"] INYOKA_REVISION = "%(num)s:%(id)s" % {"num": ctx.rev(), "id": shorthex(ctx.node())} except TypeError: # fail silently pass # This value defines the timeout for sockets in seconds. Per default python # sockets do never timeout and as such we have blocking workers. # Socket timeouts are set globally within the whole application. # The value *must* be a floating point value. socket.setdefaulttimeout(10.0) #: bind the context ctx = ApplicationContext() ctx.bind() # setup components ctx.load_packages(ctx.cfg["activated_components"]) if ctx.cfg["testing"]: logger.level_name = "ERROR" # makes INYOKA_REVISION visible in the extra dict of every log record proc = Processor(lambda x: x.extra.update(INYOKA_REVISION=INYOKA_REVISION)) proc.push_application()
def __init__(self, algo, sim_params): # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params # ============== # Perf Tracker # Setup # ============== self.perf_tracker = PerformanceTracker(self.sim_params) self.perf_key = self.EMISSION_TO_PERF_KEY_MAP[ self.perf_tracker.emission_rate] # ============== # Algo Setup # ============== self.algo = algo self.algo_start = self.sim_params.first_open self.algo_start = self.algo_start.replace(hour=0, minute=0, second=0, microsecond=0) # ============== # Snapshot Setup # ============== # The algorithm's data as of our most recent event. # We want an object that will have empty objects as default # values on missing keys. self.current_data = BarData() # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None self.snapshot_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if not 'algo_dt' in record.extra: record.extra['algo_dt'] = self.snapshot_dt self.processor = Processor(inject_algo_dt)
def test_json_formatting_works(): def inject_extra(record): record.extra['ip'] = '127.0.0.1' record.extra['username'] = '******' with TestHandler() as handler: handler.formatter = json_formatter logger = logging.getLogger('Dummy') with redirected_logging(): with Processor(inject_extra): logger.info('hello world') assert len(handler.formatted_records) == 1 record = json.loads(handler.formatted_records[0]) assert record['level'] == 'INFO' assert record['name'] == 'Dummy' assert record['message'] == 'hello world' assert record['ip'] == '127.0.0.1' assert record['username'] == 'Andrey'
def transact_stub(slippage, commission, event, open_orders): """ This is intended to be wrapped in a partial, so that the slippage and commission models can be enclosed. """ def inject_algo_dt(record): if not 'algo_dt' in record.extra: record.extra['algo_dt'] = event['dt'] with Processor(inject_algo_dt).threadbound(): transaction = slippage.simulate(event, open_orders) if transaction and transaction.amount != 0: direction = abs(transaction.amount) / transaction.amount per_share, total_commission = commission.calculate(transaction) transaction.price = transaction.price + (per_share * direction) transaction.commission = total_commission return transaction
def __init__(self, order_book, algo, algo_start): # ========== # Algo Setup # ========== # We extract the order book from the txn client so that # the algo can place new orders. self.order_book = order_book self.algo = algo self.algo_start = algo_start.replace(hour=0, minute=0, second=0, microsecond=0) # Monkey patch the user algorithm to place orders in the # TransactionSimulator's order book and use our logger. self.algo.set_order(self.order) # ============== # Snapshot Setup # ============== # The algorithm's universe as of our most recent event. # We want an ndict that will have empty ndicts as default # values on missing keys. self.universe = ndict(internal=defaultdict(ndict)) # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None self.snapshot_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): record.extra["algo_dt"] = self.snapshot_dt self.processor = Processor(inject_algo_dt)
def transact_stub(slippage, commission, event, open_orders): """ This is intended to be wrapped in a partial, so that the slippage and commission models can be enclosed. """ def inject_algo_dt(record): if not 'algo_dt' in record.extra: record.extra['algo_dt'] = event['dt'] with Processor(inject_algo_dt).threadbound(): transactions = slippage.simulate(event, open_orders) for transaction in transactions: if (transaction and not zp_math.tolerant_equals(transaction.amount, 0)): direction = math.copysign(1, transaction.amount) per_share, total_commission = commission.calculate(transaction) transaction.price = transaction.price + (per_share * direction) transaction.commission = total_commission return transactions
def __init__(self, algo, sim_params): # Performance Setup self.fast_backtest = sim_params.fast_backtest # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params # ============== # Algo Setup # ============== self.algo = algo self.algo_start = normalize_date(self.sim_params.first_open) # ============== # Snapshot Setup # ============== # The algorithm's data as of our most recent event. # We want an object that will have empty objects as default # values on missing keys. self.current_data = BarData() # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if 'algo_dt' not in record.extra: record.extra['algo_dt'] = self.simulation_dt self.processor = Processor(inject_algo_dt)
def __init__(self, algo, sim_params): # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params # ============== # Algo Setup # ============== self.algo = algo self.algo_start = normalize_date(self.sim_params.first_open) self.env = algo.trading_environment # ============== # Snapshot Setup # ============== # The algorithm's data as of our most recent event. # We want an object that will have empty objects as default # values on missing keys. self.current_data = BarData() # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None self.previous_dt = self.algo_start # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if 'algo_dt' not in record.extra: record.extra['algo_dt'] = self.simulation_dt self.processor = Processor(inject_algo_dt)
def run(self): def inject_worker_id(record): record.extra['worker_id'] = self.id with Processor(inject_worker_id).threadbound(): iterations_left = NUM_ITERATIONS num_fails_in_sequence = 0 while iterations_left > 0 and not self.exit_event.is_set(): # acquire the locks locked = [] for data_id in range(NUM_DATA_SLOTS): key = 'key-%d' % data_id if self.lock(key): locked.append(key) else: break if len(locked) == NUM_DATA_SLOTS: num_fails_in_sequence = 0 for data_id in range(NUM_DATA_SLOTS): self.d[data_id].append(self.id) iterations_left -= 1 logger.info('decrement: locked=%r' % (locked, )) else: num_fails_in_sequence += 1 if num_fails_in_sequence > NUM_ITERATIONS * NUM_DATA_SLOTS * NUM_WORKERS: # seems that algorithm stuck somewhere, # exit from worker logger.warning('seems that algorithm stuck somewhere') break # release the locks for key in reversed(locked): self.unlock(key) time.sleep(random.random()) logger.debug('exit from the worker, locals=%r' % (locals(), ))
class AlgorithmSimulator(object): def __init__(self, order_book, algo, algo_start): # ========== # Algo Setup # ========== # We extract the order book from the txn client so that # the algo can place new orders. self.order_book = order_book self.algo = algo self.algo_start = algo_start.replace(hour=0, minute=0, second=0, microsecond=0) # Monkey patch the user algorithm to place orders in the # TransactionSimulator's order book and use our logger. self.algo.set_order(self.order) # ============== # Snapshot Setup # ============== # The algorithm's universe as of our most recent event. # We want an ndict that will have empty ndicts as default # values on missing keys. self.universe = ndict(internal=defaultdict(ndict)) # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None self.snapshot_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if not 'algo_dt' in record.extra: record.extra['algo_dt'] = self.snapshot_dt self.processor = Processor(inject_algo_dt) def order(self, sid, amount): """ Closure to pass into the user's algo to allow placing orders into the transaction simulator's dict of open orders. """ order = ndict({ 'dt': self.simulation_dt, 'sid': sid, 'amount': int(amount), 'filled': 0 }) # Tell the user if they try to buy 0 shares of something. if order.amount == 0: zero_message = "Requested to trade zero shares of {sid}".format( sid=order.sid) log.debug(zero_message) # Don't bother placing orders for 0 shares. return # Add non-zero orders to the order book. # !!!IMPORTANT SIDE-EFFECT!!! # This modifies the internal state of the transaction # simulator so that it can fill the placed order when it # receives its next message. self.order_book.place_order(order) def transform(self, stream_in): """ Main generator work loop. """ # inject the current algo # snapshot time to any log record generated. with self.processor.threadbound(): for date, snapshot in stream_in: # Set the simulation date to be the first event we see. # This should only occur once, at the start of the test. if self.simulation_dt is None: self.simulation_dt = date # Done message has the risk report, so we yield before exiting. if date == 'DONE': for event in snapshot: yield event.perf_message raise StopIteration # We're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. elif date < self.algo_start: for event in snapshot: del event['perf_message'] self.update_universe(event) # The algo has taken so long to process events that # its simulated time is later than the event time. # Update the universe and yield any perf messages # encountered, but don't call handle_data. elif date < self.simulation_dt: for event in snapshot: # Only yield if we have something interesting to say. if event.perf_message is not None: yield event.perf_message # Delete the message before updating, # so we don't send it to the user. del event['perf_message'] self.update_universe(event) # Regular snapshot. Update the universe and send a snapshot # to handle data. else: for event in snapshot: # Only yield if we have something interesting to say. if event.perf_message is not None: yield event.perf_message del event['perf_message'] self.update_universe(event) # Send the current state of the universe # to the user's algo. self.simulate_snapshot(date) def update_universe(self, event): """ Update the universe with new event information. """ # Update our portfolio. self.algo.set_portfolio(event.portfolio) # Update our knowledge of this event's sid for field in event.keys(): self.universe[event.sid][field] = event[field] def simulate_snapshot(self, date): """ Run the user's algo against our current snapshot and update the algo's simulated time. """ # Needs to be set so that we inject the proper date into algo # log/print lines. self.snapshot_dt = date self.algo.set_datetime(self.snapshot_dt) start_tic = datetime.now() self.algo.handle_data(self.universe) stop_tic = datetime.now() # How long did you take? delta = stop_tic - start_tic # Update the simulation time. self.simulation_dt = date + delta
class AlgorithmSimulator(object): EMISSION_TO_PERF_KEY_MAP = { 'minute': 'intraday_perf', 'daily': 'daily_perf' } def __init__(self, blotter, perf_tracker, algo, algo_start): # ========== # Algo Setup # ========== # We extract the order book from the txn client so that # the algo can place new orders. self.blotter = blotter self.perf_tracker = perf_tracker self.perf_key = self.EMISSION_TO_PERF_KEY_MAP[ perf_tracker.emission_rate] self.algo = algo self.algo_start = algo_start.replace(hour=0, minute=0, second=0, microsecond=0) # Monkey patch the user algorithm to place orders in the # TransactionSimulator's order book and use our logger. self.algo.set_order(self.order) # ============== # Snapshot Setup # ============== # The algorithm's universe as of our most recent event. # We want an ndict that will have empty objects as default # values on missing keys. self.universe = ndict(internal=defaultdict(SIDData)) # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None self.snapshot_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if not 'algo_dt' in record.extra: record.extra['algo_dt'] = self.snapshot_dt self.processor = Processor(inject_algo_dt) def order(self, sid, amount, limit_price=None, stop_price=None): # something could be done with amount to further divide # between buy by share count OR buy shares up to a dollar amount # numeric == share count AND "$dollar.cents" == cost amount """ amount > 0 :: Buy/Cover amount < 0 :: Sell/Short Market order: order(sid,amount) Limit order: order(sid,amount, limit_price) Stop order: order(sid,amount, None, stop_price) StopLimit order: order(sid,amount, limit_price, stop_price) """ # just validates amount and passes rest on to TransactionSimulator # Tell the user if they try to buy 0 shares of something. if amount == 0: zero_message = "Requested to trade zero shares of {psid}".format( psid=sid ) log.debug(zero_message) # Don't bother placing orders for 0 shares. return order = Order(**{ 'dt': self.simulation_dt, 'sid': sid, 'amount': int(amount), 'filled': 0, 'stop': stop_price, 'limit': limit_price }) # Add non-zero orders to the order book. # !!!IMPORTANT SIDE-EFFECT!!! # This modifies the internal state of the blotter # so that it can fill the placed order when it # receives its next message. self.blotter.place_order(order) def transform(self, stream_in): """ Main generator work loop. """ # Set the simulation date to be the first event we see. peek_date, peek_snapshot = next(stream_in) self.simulation_dt = peek_date # Stitch back together the generator by placing the peeked # event back in front stream = itertools.chain([(peek_date, peek_snapshot)], stream_in) # inject the current algo # snapshot time to any log record generated. with self.processor.threadbound(): for date, snapshot in stream: # We're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. if date < self.algo_start: for event in snapshot: del event['perf_messages'] self.update_universe(event) # Regular snapshot. Update the universe and send a snapshot # to handle data. else: for event in snapshot: for perf_message in event.perf_messages: # append current values of recorded vars # to emitted message perf_message[self.perf_key]['recorded_vars'] =\ self.algo.recorded_vars yield perf_message del event['perf_messages'] self.update_universe(event) # Send the current state of the universe # to the user's algo. self.simulate_snapshot(date) perf_messages, risk_message = \ self.perf_tracker.handle_simulation_end() if self.perf_tracker.emission_rate == 'daily': for message in perf_messages: message[self.perf_key]['recorded_vars'] =\ self.algo.recorded_vars yield message # When emitting minutely, it is still useful to have a final # packet with the entire days performance rolled up. if self.perf_tracker.emission_rate == 'minute': daily_rollup = self.perf_tracker.to_dict( emission_type='daily' ) daily_rollup['daily_perf']['recorded_vars'] = \ self.algo.recorded_vars yield daily_rollup yield risk_message def update_universe(self, event): """ Update the universe with new event information. """ # Update our portfolio. self.algo.set_portfolio(event.portfolio) # the portfolio is modified by each event passed into the # performance tracker (prices and amounts can change). # Performance tracker sends back an up-to-date portfolio # with each event. However, we provide the portfolio to # the algorithm via a setter method, rather than as part # of the event data sent to handle_data. To avoid # confusion, we remove it from the event here. del event.portfolio # Update our knowledge of this event's sid sid_data = self.universe[event.sid] sid_data.__dict__.update(event.__dict__) def simulate_snapshot(self, date): """ Run the user's algo against our current snapshot and update the algo's simulated time. """ # Needs to be set so that we inject the proper date into algo # log/print lines. self.snapshot_dt = date self.algo.set_datetime(self.snapshot_dt) self.algo.handle_data(self.universe) # Update the simulation time. self.simulation_dt = date
def __init__(self, algo, sim_params): # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params # ============== # Algo Setup # ============== self.algo = algo self.algo_start = normalize_date(self.sim_params.first_open) self.env = algo.trading_environment # ============== # Snapshot Setup # ============== _day = timedelta(days=1) def _get_removal_date(sid, finder=self.env.asset_finder, default=self.sim_params.last_close + _day): """ Get the date of the morning on which we should remove an asset from data. If we don't have an auto_close_date, this is just the end of the simulation. If we have an auto_close_date, then we remove assets from data on max(asset.auto_close_date, asset.end_date + timedelta(days=1)) We hold assets at least until auto_close_date because up until that date the user might still hold positions or have open orders in an expired asset. We hold assets at least until end_date + 1, because an asset continues trading until the **end** of its end_date. Even if an asset auto-closed before the end_date (say, because Interactive Brokers clears futures positions prior the actual notice or expiration), there may still be trades arriving that represent signals for other assets that are still tradeable. (Particularly in the futures case, trading in the final days of a contract are likely relevant for trading the next contract on the same future chain.) 获取上午的日期,我们应该从数据中删除资产。 #如果我们没有auto_close_date,那只是结束了 #模拟。 #如果我们有auto_close_date,那么我们从数据中删除资产 #max(asset.auto_close_date,asset.end_date + timedelta(days = 1)) #我们至少持有资产直到auto_close_date,因为直到那之前 #用户可能仍然持有头寸或在一个开仓订单的日期 #过期资产。 #我们持有资产至少到end_date + 1,因为资产 #继续交易直到其end_date的**结束**。即使是 #end_date之前的资产自动关闭(比如因为Interactive #经纪人在实际通知之前清算期货头寸或 #到期),可能仍有交易到达代表 #其他资产仍可交易的信号。 (特别是在 #期货案例,在合约的最后几天进行交易 #可能与在同一个未来交易下一份合约有关 #链。) """ try: asset = finder.retrieve_asset(sid) except ValueError: # Handle sid not an int, such as from a custom source. # So that they don't compare equal to other sids, and we'd # blow up comparing strings to ints, let's give them unique # close dates. return default + timedelta(microseconds=id(sid)) except SidsNotFound: return default auto_close_date = asset.auto_close_date if auto_close_date is None: # If we don't have an auto_close_date, we never remove an asset # from the user's portfolio. return default end_date = asset.end_date if end_date is None: # If we have an auto_close_date but not an end_date, clear the # asset from data when we clear positions/orders. return auto_close_date # If we have both, make close once we're on or after the # auto_close_date, and strictly after the end_date. # See docstring above for an explanation of this logic. return max(auto_close_date, end_date + _day) self._get_removal_date = _get_removal_date # The algorithm's data as of our most recent event. # Maintain sids in order by asset close date, so that we can more # efficiently remove them when their times come...算法的数据是我们最近的事件。 # 按资产截止日期按顺序维护sid,以便我们可以在时间到来时更有效地删除它们... self.current_data = BarData(SortedDict(self._get_removal_date)) # We don't have a datetime for the current snapshot until we # receive a message.在收到消息之前,我们没有当前快照的日期时间 self.simulation_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if 'algo_dt' not in record.extra: record.extra['algo_dt'] = self.simulation_dt self.processor = Processor(inject_algo_dt)
class AlgorithmSimulator(object): def __init__(self, order_book, perf_tracker, algo, algo_start): # ========== # Algo Setup # ========== # We extract the order book from the txn client so that # the algo can place new orders. self.order_book = order_book self.perf_tracker = perf_tracker self.algo = algo self.algo_start = algo_start.replace(hour=0, minute=0, second=0, microsecond=0) # Monkey patch the user algorithm to place orders in the # TransactionSimulator's order book and use our logger. self.algo.set_order(self.order) # ============== # Snapshot Setup # ============== # The algorithm's universe as of our most recent event. # We want an ndict that will have empty objects as default # values on missing keys. self.universe = ndict(internal=defaultdict(SIDData)) # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None self.snapshot_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if not 'algo_dt' in record.extra: record.extra['algo_dt'] = self.snapshot_dt self.processor = Processor(inject_algo_dt) def order(self, sid, amount): """ Closure to pass into the user's algo to allow placing orders into the transaction simulator's dict of open orders. """ order = Order({ 'dt': self.simulation_dt, 'sid': sid, 'amount': int(amount), 'filled': 0 }) # Tell the user if they try to buy 0 shares of something. if order.amount == 0: zero_message = "Requested to trade zero shares of {sid}".format( sid=order.sid ) log.debug(zero_message) # Don't bother placing orders for 0 shares. return # Add non-zero orders to the order book. # !!!IMPORTANT SIDE-EFFECT!!! # This modifies the internal state of the transaction # simulator so that it can fill the placed order when it # receives its next message. self.order_book.place_order(order) def transform(self, stream_in): """ Main generator work loop. """ # inject the current algo # snapshot time to any log record generated. with self.processor.threadbound(): for date, snapshot in stream_in: # Set the simulation date to be the first event we see. # This should only occur once, at the start of the test. if self.simulation_dt is None: self.simulation_dt = date # We're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. if date < self.algo_start: for event in snapshot: del event['perf_messages'] self.update_universe(event) # Regular snapshot. Update the universe and send a snapshot # to handle data. else: for event in snapshot: for perf_message in event.perf_messages: yield perf_message del event['perf_messages'] self.update_universe(event) # Send the current state of the universe # to the user's algo. self.simulate_snapshot(date) perf_messages, risk_message = \ self.perf_tracker.handle_simulation_end() for message in perf_messages: yield message yield risk_message def update_universe(self, event): """ Update the universe with new event information. """ # Update our portfolio. self.algo.set_portfolio(event.portfolio) # Update our knowledge of this event's sid sid_data = self.universe[event.sid] sid_data.__dict__.update(event.__dict__) def simulate_snapshot(self, date): """ Run the user's algo against our current snapshot and update the algo's simulated time. """ # Needs to be set so that we inject the proper date into algo # log/print lines. self.snapshot_dt = date self.algo.set_datetime(self.snapshot_dt) self.algo.handle_data(self.universe) # Update the simulation time. self.simulation_dt = date
class AlgorithmSimulator(object): EMISSION_TO_PERF_KEY_MAP = {"minute": "minute_perf", "daily": "daily_perf"} def get_hash(self): """ There should only ever be one TSC in the system, so we don't bother passing args into the hash. """ return self.__class__.__name__ + hash_args() def __init__(self, algo, sim_params): # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params # ============== # Algo Setup # ============== self.algo = algo self.algo_start = self.sim_params.first_open self.algo_start = self.algo_start.replace(hour=0, minute=0, second=0, microsecond=0) # ============== # Snapshot Setup # ============== # The algorithm's data as of our most recent event. # We want an object that will have empty objects as default # values on missing keys. self.current_data = BarData() # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if not "algo_dt" in record.extra: record.extra["algo_dt"] = self.simulation_dt self.processor = Processor(inject_algo_dt) @property def perf_key(self): return self.EMISSION_TO_PERF_KEY_MAP[self.algo.perf_tracker.emission_rate] def transform(self, stream_in): """ Main generator work loop. """ # Initialize the mkt_close mkt_close = self.algo.perf_tracker.market_close # inject the current algo # snapshot time to any log record generated. with self.processor.threadbound(): updated = False bm_updated = False for date, snapshot in stream_in: self.algo.set_datetime(date) self.simulation_dt = date self.algo.perf_tracker.set_date(date) self.algo.blotter.set_date(date) # If we're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. if date < self.algo_start: for event in snapshot: if event.type in (DATASOURCE_TYPE.TRADE, DATASOURCE_TYPE.CUSTOM): self.update_universe(event) self.algo.perf_tracker.process_event(event) else: for event in snapshot: if event.type in (DATASOURCE_TYPE.TRADE, DATASOURCE_TYPE.CUSTOM): self.update_universe(event) updated = True if event.type == DATASOURCE_TYPE.BENCHMARK: self.algo.set_datetime(event.dt) bm_updated = True txns, orders = self.algo.blotter.process_trade(event) for data in chain(txns, orders, [event]): self.algo.perf_tracker.process_event(data) # Update our portfolio. self.algo.set_portfolio(self.algo.perf_tracker.get_portfolio()) # Send the current state of the universe # to the user's algo. if updated: self.algo.handle_data(self.current_data) updated = False # run orders placed in the algorithm call # above through perf tracker before emitting # the perf packet, so that the perf includes # placed orders for order in self.algo.blotter.new_orders: self.algo.perf_tracker.process_event(order) self.algo.blotter.new_orders = [] # The benchmark is our internal clock. When it # updates, we need to emit a performance message. if bm_updated: bm_updated = False yield self.get_message(date) # When emitting minutely, we re-iterate the day as a # packet with the entire days performance rolled up. if self.algo.perf_tracker.emission_rate == "minute": if date == mkt_close: daily_rollup = self.algo.perf_tracker.to_dict(emission_type="daily") daily_rollup["daily_perf"]["recorded_vars"] = self.algo.recorded_vars yield daily_rollup tp = self.algo.perf_tracker.todays_performance tp.rollover() if mkt_close < self.algo.perf_tracker.last_close: mkt_close = self.get_next_close(mkt_close) self.algo.perf_tracker.handle_intraday_close() risk_message = self.algo.perf_tracker.handle_simulation_end() yield risk_message def get_message(self, date): rvars = self.algo.recorded_vars if self.algo.perf_tracker.emission_rate == "daily": perf_message = self.algo.perf_tracker.handle_market_close() perf_message["daily_perf"]["recorded_vars"] = rvars return perf_message elif self.algo.perf_tracker.emission_rate == "minute": self.algo.perf_tracker.handle_minute_close(date) perf_message = self.algo.perf_tracker.to_dict() perf_message["minute_perf"]["recorded_vars"] = rvars return perf_message def get_next_close(self, mkt_close): if mkt_close >= trading.environment.last_trading_day: return self.sim_params.last_close else: return trading.environment.next_open_and_close(mkt_close)[1] def update_universe(self, event): """ Update the universe with new event information. """ # Update our knowledge of this event's sid sid_data = self.current_data[event.sid] sid_data.__dict__.update(event.__dict__)
class AlgorithmSimulator(object): EMISSION_TO_PERF_KEY_MAP = { 'minute': 'minute_perf', 'daily': 'daily_perf' } def __init__(self, algo, sim_params): # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params # ============== # Algo Setup # ============== self.algo = algo self.algo_start = normalize_date(self.sim_params.first_open) # ============== # Snapshot Setup # ============== # The algorithm's data as of our most recent event. # We want an object that will have empty objects as default # values on missing keys. self.current_data = BarData() # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if 'algo_dt' not in record.extra: record.extra['algo_dt'] = self.simulation_dt self.processor = Processor(inject_algo_dt) def transform(self, stream_in): """ Main generator work loop. """ # Initialize the mkt_close mkt_open = self.algo.perf_tracker.market_open mkt_close = self.algo.perf_tracker.market_close # inject the current algo # snapshot time to any log record generated. with ExitStack() as stack: stack.enter_context(self.processor.threadbound()) stack.enter_context(ZiplineAPI(self.algo)) data_frequency = self.sim_params.data_frequency self._call_before_trading_start(mkt_open) for date, snapshot in stream_in: self.simulation_dt = date self.on_dt_changed(date) # If we're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. if date < self.algo_start: for event in snapshot: if event.type == DATASOURCE_TYPE.SPLIT: self.algo.blotter.process_split(event) elif event.type == DATASOURCE_TYPE.TRADE: self.update_universe(event) self.algo.perf_tracker.process_trade(event) elif event.type == DATASOURCE_TYPE.CUSTOM: self.update_universe(event) else: messages = self._process_snapshot( date, snapshot, self.algo.instant_fill, ) # Perf messages are only emitted if the snapshot contained # a benchmark event. for message in messages: yield message # When emitting minutely, we need to call # before_trading_start before the next trading day begins if date == mkt_close: if mkt_close <= self.algo.perf_tracker.last_close: before_last_close = \ mkt_close < self.algo.perf_tracker.last_close try: mkt_open, mkt_close = \ trading.environment \ .next_open_and_close(mkt_close) except trading.NoFurtherDataError: # If at the end of backtest history, # skip advancing market close. pass if before_last_close: self._call_before_trading_start(mkt_open) elif data_frequency == 'daily': next_day = trading.environment.next_trading_day(date) if next_day is not None and \ next_day < self.algo.perf_tracker.last_close: self._call_before_trading_start(next_day) self.algo.portfolio_needs_update = True self.algo.account_needs_update = True self.algo.performance_needs_update = True risk_message = self.algo.perf_tracker.handle_simulation_end() yield risk_message def _process_snapshot(self, dt, snapshot, instant_fill): """ Process a stream of events corresponding to a single datetime, possibly returning a perf message to be yielded. If @instant_fill = True, we delay processing of events until after the user's call to handle_data, and we process the user's placed orders before the snapshot's events. Note that this introduces a lookahead bias, since the user effectively is effectively placing orders that are filled based on trades that happened prior to the call the handle_data. If @instant_fill = False, we process Trade events before calling handle_data. This means that orders are filled based on trades occurring in the next snapshot. This is the more conservative model, and as such it is the default behavior in TradingAlgorithm. """ # Flags indicating whether we saw any events of type TRADE and type # BENCHMARK. Respectively, these control whether or not handle_data is # called for this snapshot and whether we emit a perf message for this # snapshot. any_trade_occurred = False benchmark_event_occurred = False if instant_fill: events_to_be_processed = [] # Assign process events to variables to avoid attribute access in # innermost loops. # # Done here, to allow for perf_tracker or blotter to be swapped out # or changed in between snapshots. perf_process_trade = self.algo.perf_tracker.process_trade perf_process_transaction = self.algo.perf_tracker.process_transaction perf_process_order = self.algo.perf_tracker.process_order perf_process_benchmark = self.algo.perf_tracker.process_benchmark perf_process_split = self.algo.perf_tracker.process_split perf_process_dividend = self.algo.perf_tracker.process_dividend perf_process_commission = self.algo.perf_tracker.process_commission perf_process_close_position = \ self.algo.perf_tracker.process_close_position blotter_process_trade = self.algo.blotter.process_trade blotter_process_benchmark = self.algo.blotter.process_benchmark # Containers for the snapshotted events, so that the events are # processed in a predictable order, without relying on the sorted order # of the individual sources. # There is only one benchmark per snapshot, will be set to the current # benchmark iff it occurs. benchmark = None # trades and customs are initialized as a list since process_snapshot # is most often called on market bars, which could contain trades or # custom events. trades = [] customs = [] closes = [] # splits and dividends are processed once a day. # # The avoidance of creating the list every time this is called is more # to attempt to show that this is the infrequent case of the method, # since the performance benefit from deferring the list allocation is # marginal. splits list will be allocated when a split occurs in the # snapshot. splits = None # dividends list will be allocated when a dividend occurs in the # snapshot. dividends = None for event in snapshot: if event.type == DATASOURCE_TYPE.TRADE: trades.append(event) elif event.type == DATASOURCE_TYPE.BENCHMARK: benchmark = event elif event.type == DATASOURCE_TYPE.SPLIT: if splits is None: splits = [] splits.append(event) elif event.type == DATASOURCE_TYPE.CUSTOM: customs.append(event) elif event.type == DATASOURCE_TYPE.DIVIDEND: if dividends is None: dividends = [] dividends.append(event) elif event.type == DATASOURCE_TYPE.CLOSE_POSITION: closes.append(event) else: raise log.warn("Unrecognized event=%s".format(event)) # Handle benchmark first. # # Internal broker implementation depends on the benchmark being # processed first so that transactions and commissions reported from # the broker can be injected. if benchmark is not None: benchmark_event_occurred = True perf_process_benchmark(benchmark) for txn, order in blotter_process_benchmark(benchmark): if txn.type == DATASOURCE_TYPE.TRANSACTION: perf_process_transaction(txn) elif txn.type == DATASOURCE_TYPE.COMMISSION: perf_process_commission(txn) perf_process_order(order) for trade in trades: self.update_universe(trade) any_trade_occurred = True if instant_fill: events_to_be_processed.append(trade) else: for txn, order in blotter_process_trade(trade): if txn.type == DATASOURCE_TYPE.TRANSACTION: perf_process_transaction(txn) elif txn.type == DATASOURCE_TYPE.COMMISSION: perf_process_commission(txn) perf_process_order(order) perf_process_trade(trade) for custom in customs: self.update_universe(custom) for close in closes: self.update_universe(close) perf_process_close_position(close) if splits is not None: for split in splits: # process_split is not assigned to a variable since it is # called rarely compared to the other event processors. self.algo.blotter.process_split(split) perf_process_split(split) if dividends is not None: for dividend in dividends: perf_process_dividend(dividend) if any_trade_occurred: new_orders = self._call_handle_data() for order in new_orders: perf_process_order(order) if instant_fill: # Now that handle_data has been called and orders have been placed, # process the event stream to fill user orders based on the events # from this snapshot. for trade in events_to_be_processed: for txn, order in blotter_process_trade(trade): if txn is not None: perf_process_transaction(txn) if order is not None: perf_process_order(order) perf_process_trade(trade) if benchmark_event_occurred: return self.generate_messages(dt) else: return () def _call_handle_data(self): """ Call the user's handle_data, returning any orders placed by the algo during the call. """ self.algo.event_manager.handle_data( self.algo, self.current_data, self.simulation_dt, ) orders = self.algo.blotter.new_orders self.algo.blotter.new_orders = [] return orders def _call_before_trading_start(self, dt): dt = normalize_date(dt) self.simulation_dt = dt self.on_dt_changed(dt) self.algo.before_trading_start() def on_dt_changed(self, dt): if self.algo.datetime != dt: self.algo.on_dt_changed(dt) def generate_messages(self, dt): """ Generator that yields perf messages for the given datetime. """ # Ensure that updated_portfolio has been called at least once for this # dt before we emit a perf message. This is a no-op if # updated_portfolio has already been called this dt. self.algo.updated_portfolio() self.algo.updated_account() rvars = self.algo.recorded_vars if self.algo.perf_tracker.emission_rate == 'daily': perf_message = \ self.algo.perf_tracker.handle_market_close_daily() perf_message['daily_perf']['recorded_vars'] = rvars yield perf_message elif self.algo.perf_tracker.emission_rate == 'minute': # close the minute in the tracker, and collect the daily message if # the minute is the close of the trading day minute_message, daily_message = \ self.algo.perf_tracker.handle_minute_close(dt) # collect and yield the minute's perf message minute_message['minute_perf']['recorded_vars'] = rvars yield minute_message # if there was a daily perf message, collect and yield it if daily_message: daily_message['daily_perf']['recorded_vars'] = rvars yield daily_message def update_universe(self, event): """ Update the universe with new event information. """ # Update our knowledge of this event's sid # rather than use if event.sid in ..., just trying # and handling the exception is significantly faster try: sid_data = self.current_data[event.sid] except KeyError: sid_data = self.current_data[event.sid] = SIDData(event.sid) sid_data.__dict__.update(event.__dict__)
""" import os import sys from logbook import Processor, StreamHandler, DEBUG, Logger, FileHandler my_handler = FileHandler("test.log", encoding="utf-8", level=DEBUG) # my_handler = StreamHandler(sys.stdout, level=DEBUG) def log_other_info(record): """ a) 通过 with.processor可以让在其中的日志拥有共同的逻辑,相当于一个切面注入 比如这里的例子是 在每条日志中记录一些额外的信息(额外的信息是通过在日志对象(logRecord)的extra(字典对象)属性中添加 一些其他的信息),这样每条日志都会有这里添加的额外的信息。 b) 有个疑问就是,这些额外的信息怎么运用呢,比如这些信息如何能和日志一块记录在文件中呢 c) 关于日志的属性,见 logrecord.py """ record.extra['myname'] = 'kute' record.extra['mycwd'] = os.getcwd() # update myname propertiy record.extra.update(myname="lisa") print(record.to_dict()) if __name__ == "__main__": with my_handler.applicationbound(): with Processor(log_other_info).applicationbound(): mylog = Logger("processor") mylog.notice("notice msg.")
def __init__(self, request): Processor.__init__(self, make_request_info_injector(request))
from logbook import warn, StreamHandler import sys from termcc.cc import cc my_handler = StreamHandler(sys.stdout) my_handler.push_application() warn(cc(':red: :yin_yang: This is a warning :reset:')) import os from logbook import Processor def inject_cwd(record): record.extra['cwd'] = os.getcwd() with my_handler.applicationbound(): with Processor(inject_cwd).applicationbound(): warn(cc(':blue: :yin_yang: This is a warning'))
def __init__(self, algo, sim_params): # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params # ============== # Algo Setup # ============== self.algo = algo self.algo_start = normalize_date(self.sim_params.first_open) self.env = algo.trading_environment # ============== # Snapshot Setup # ============== def _get_asset_close_date(sid, finder=self.env.asset_finder, default=self.sim_params.last_close + timedelta(days=1)): try: asset = finder.retrieve_asset(sid) except ValueError: # Handle sid not an int, such as from a custom source. # So that they don't compare equal to other sids, and we'd # blow up comparing strings to ints, let's give them unique # close dates. return default + timedelta(microseconds=id(sid)) except SidsNotFound: return default # Default is used when the asset has no auto close date, # and is set to a time after the simulation ends, so that the # relevant asset isn't removed from the universe at all # (at least not for this reason). return asset.auto_close_date or default self._get_asset_close = _get_asset_close_date # The algorithm's data as of our most recent event. # Maintain sids in order by asset close date, so that we can more # efficiently remove them when their times come... self.current_data = BarData(SortedDict(self._get_asset_close)) # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if 'algo_dt' not in record.extra: record.extra['algo_dt'] = self.simulation_dt self.processor = Processor(inject_algo_dt)
def __init__(self, algo, sim_params): # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params # ============== # Algo Setup # ============== self.algo = algo self.algo_start = normalize_date(self.sim_params.first_open) self.env = algo.trading_environment # ============== # Snapshot Setup # ============== _day = timedelta(days=1) def _get_removal_date(sid, finder=self.env.asset_finder, default=self.sim_params.last_close + _day): """ Get the date of the morning on which we should remove an asset from data. If we don't have an auto_close_date, this is just the end of the simulation. If we have an auto_close_date, then we remove assets from data on max(asset.auto_close_date, asset.end_date + timedelta(days=1)) We hold assets at least until auto_close_date because up until that date the user might still hold positions or have open orders in an expired asset. We hold assets at least until end_date + 1, because an asset continues trading until the **end** of its end_date. Even if an asset auto-closed before the end_date (say, because Interactive Brokers clears futures positions prior the actual notice or expiration), there may still be trades arriving that represent signals for other assets that are still tradeable. (Particularly in the futures case, trading in the final days of a contract are likely relevant for trading the next contract on the same future chain.) """ try: asset = finder.retrieve_asset(sid) except ValueError: # Handle sid not an int, such as from a custom source. # So that they don't compare equal to other sids, and we'd # blow up comparing strings to ints, let's give them unique # close dates. return default + timedelta(microseconds=id(sid)) except SidsNotFound: return default auto_close_date = asset.auto_close_date if auto_close_date is None: # If we don't have an auto_close_date, we never remove an asset # from the user's portfolio. return default end_date = asset.end_date if end_date is None: # If we have an auto_close_date but not an end_date, clear the # asset from data when we clear positions/orders. return auto_close_date # If we have both, make close once we're on or after the # auto_close_date, and strictly after the end_date. # See docstring above for an explanation of this logic. return max(auto_close_date, end_date + _day) self._get_removal_date = _get_removal_date # The algorithm's data as of our most recent event. # Maintain sids in order by asset close date, so that we can more # efficiently remove them when their times come... self.current_data = BarData(SortedDict(self._get_removal_date)) # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if 'algo_dt' not in record.extra: record.extra['algo_dt'] = self.simulation_dt self.processor = Processor(inject_algo_dt)
def g(*args, **kargs): with Processor(partial(inject_func, f.__name__, args, kargs)).threadbound(): return f(*args, **kargs)
class AlgorithmSimulator(object): def __init__(self, order_book, perf_tracker, algo, algo_start): # ========== # Algo Setup # ========== # We extract the order book from the txn client so that # the algo can place new orders. self.order_book = order_book self.perf_tracker = perf_tracker self.algo = algo self.algo_start = algo_start.replace(hour=0, minute=0, second=0, microsecond=0) # Monkey patch the user algorithm to place orders in the # TransactionSimulator's order book and use our logger. self.algo.set_order(self.order) # ============== # Snapshot Setup # ============== # The algorithm's universe as of our most recent event. # We want an ndict that will have empty objects as default # values on missing keys. self.universe = ndict(internal=defaultdict(SIDData)) # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None self.snapshot_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if not 'algo_dt' in record.extra: record.extra['algo_dt'] = self.snapshot_dt self.processor = Processor(inject_algo_dt) def order(self, sid, amount, **kwargs ): # something could be done with amount to further divide # between buy by share count OR buy shares up to a dollar amount # numeric == share count AND "$dollar.cents" == cost amount """ amount > 0 :: Buy/Cover amount < 0 :: Sell/Short Market order: order(sid,amount) Limit order: order(sid,amount, limit=price) Stop order: order(sid,amount, stop=price) StopLimit order: order(sid,amount, limit=price, stop=price) """ # just validates amount and passes rest on to TransactionSimulator # Tell the user if they try to buy 0 shares of something. if amount == 0: zero_message = "Requested to trade zero shares of {0}".format(sid) log.debug(zero_message) # Don't bother placing orders for 0 shares. return stop_price,limit_price = None,None for name, value in kwargs.items(): if name == "limit": limit_price = value if name == "stop": stop_price = value order = Order({ 'dt': self.simulation_dt, 'sid': sid, 'amount': int(amount), 'filled': 0, 'stop': stop_price, 'limit': limit_price, 'orig_stop': stop_price, 'orig_limit': limit_price }) # Add non-zero orders to the order book. # !!!IMPORTANT SIDE-EFFECT!!! # This modifies the internal state of the transaction # simulator so that it can fill the placed order when it # receives its next message. err_str = self.order_book.place_order(order) if err_str != None and len(err_str) > 0: # error, trade was not placed, log it out log.debug(err_str) def transform(self, stream_in): """ Main generator work loop. """ # inject the current algo # snapshot time to any log record generated. with self.processor.threadbound(): for date, snapshot in stream_in: # Set the simulation date to be the first event we see. # This should only occur once, at the start of the test. if self.simulation_dt is None: self.simulation_dt = date # We're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. if date < self.algo_start: for event in snapshot: del event['perf_messages'] self.update_universe(event) # Regular snapshot. Update the universe and send a snapshot # to handle data. else: for event in snapshot: for perf_message in event.perf_messages: yield perf_message del event['perf_messages'] self.update_universe(event) # Send the current state of the universe # to the user's algo. self.simulate_snapshot(date) perf_messages, risk_message = \ self.perf_tracker.handle_simulation_end() for message in perf_messages: yield message yield risk_message def update_universe(self, event): """ Update the universe with new event information. """ # Update our portfolio. self.algo.set_portfolio(event.portfolio) # Update our knowledge of this event's sid sid_data = self.universe[event.sid] sid_data.__dict__.update(event.__dict__) def simulate_snapshot(self, date): """ Run the user's algo against our current snapshot and update the algo's simulated time. """ # Needs to be set so that we inject the proper date into algo # log/print lines. self.snapshot_dt = date self.algo.set_datetime(self.snapshot_dt) self.algo.handle_data(self.universe) # Update the simulation time. self.simulation_dt = date
class AlgorithmSimulator(object): EMISSION_TO_PERF_KEY_MAP = { 'minute': 'intraday_perf', 'daily': 'daily_perf' } def get_hash(self): """ There should only ever be one TSC in the system, so we don't bother passing args into the hash. """ return self.__class__.__name__ + hash_args() def __init__(self, algo, sim_params): # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params # ============== # Perf Tracker # Setup # ============== self.perf_tracker = PerformanceTracker(self.sim_params) self.perf_key = self.EMISSION_TO_PERF_KEY_MAP[ self.perf_tracker.emission_rate] # ============== # Algo Setup # ============== self.algo = algo self.algo_start = self.sim_params.first_open self.algo_start = self.algo_start.replace(hour=0, minute=0, second=0, microsecond=0) # ============== # Snapshot Setup # ============== # The algorithm's data as of our most recent event. # We want an object that will have empty objects as default # values on missing keys. self.current_data = BarData() # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None self.snapshot_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if not 'algo_dt' in record.extra: record.extra['algo_dt'] = self.snapshot_dt self.processor = Processor(inject_algo_dt) def transform(self, stream_in): """ Main generator work loop. """ # Set the simulation date to be the first event we see. peek_date, peek_snapshot = next(stream_in) self.simulation_dt = peek_date # Stitch back together the generator by placing the peeked # event back in front stream = itertools.chain([(peek_date, peek_snapshot)], stream_in) # inject the current algo # snapshot time to any log record generated. with self.processor.threadbound(): updated = False bm_updated = False for date, snapshot in stream: self.perf_tracker.set_date(date) self.algo.blotter.set_date(date) # If we're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. if date < self.algo_start: for event in snapshot: if event.type in (DATASOURCE_TYPE.TRADE, DATASOURCE_TYPE.CUSTOM): self.update_universe(event) self.perf_tracker.process_event(event) else: for event in snapshot: if event.type in (DATASOURCE_TYPE.TRADE, DATASOURCE_TYPE.CUSTOM): self.update_universe(event) updated = True if event.type == DATASOURCE_TYPE.BENCHMARK: bm_updated = True txns, orders = self.algo.blotter.process_trade(event) for data in chain([event], txns, orders): self.perf_tracker.process_event(data) # Update our portfolio. self.algo.set_portfolio(self.perf_tracker.get_portfolio()) # Send the current state of the universe # to the user's algo. if updated: self.simulate_snapshot(date) updated = False # run orders placed in the algorithm call # above through perf tracker before emitting # the perf packet, so that the perf includes # placed orders for order in self.algo.blotter.new_orders: self.perf_tracker.process_event(order) self.algo.blotter.new_orders = [] # The benchmark is our internal clock. When it # updates, we need to emit a performance message. if bm_updated: bm_updated = False yield self.get_message(date) risk_message = self.perf_tracker.handle_simulation_end() # When emitting minutely, it is still useful to have a final # packet with the entire days performance rolled up. if self.perf_tracker.emission_rate == 'minute': daily_rollup = self.perf_tracker.to_dict( emission_type='daily' ) daily_rollup['daily_perf']['recorded_vars'] = \ self.algo.recorded_vars yield daily_rollup yield risk_message def get_message(self, date): rvars = self.algo.recorded_vars if self.perf_tracker.emission_rate == 'daily': perf_message = \ self.perf_tracker.handle_market_close() perf_message['daily_perf']['recorded_vars'] = rvars return perf_message elif self.perf_tracker.emission_rate == 'minute': self.perf_tracker.handle_minute_close(date) perf_message = self.perf_tracker.to_dict() perf_message['intraday_perf']['recorded_vars'] = rvars return perf_message def update_universe(self, event): """ Update the universe with new event information. """ # Update our knowledge of this event's sid sid_data = self.current_data[event.sid] sid_data.__dict__.update(event.__dict__) def simulate_snapshot(self, date): """ Run the user's algo against our current snapshot and update the algo's simulated time. """ # Needs to be set so that we inject the proper date into algo # log/print lines. self.snapshot_dt = date self.algo.set_datetime(self.snapshot_dt) # Update the simulation time. self.simulation_dt = date self.algo.handle_data(self.current_data)
' : ', '{record.message}', ]), } def inject_extra(record): record.extra['basename'] = os.path.basename(record.filename) record.extra['level_color'] = get_log_color(record.level) record.extra['clear_color'] = color.ENDC logger = Logger('root') # extra info processor = Processor(inject_extra) processor.push_application() # for screen log screen_level = INFO stream_handler = StreamHandler(sys.stdout, level=screen_level, bubble=True) stream_handler.format_string = formatter['screen'] stream_handler.push_application() # for rolling file log p = os.environ['FBPATH'] if not os.path.isdir(p): os.system('mkdir -p {}'.format(p)) file_path = os.path.expanduser(os.path.join(p, 'logs')) if os.path.isdir(file_path): backup_count = 7
class AlgorithmSimulator(object): EMISSION_TO_PERF_KEY_MAP = {'minute': 'minute_perf', 'daily': 'daily_perf'} def __init__(self, algo, sim_params): # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params # ============== # Algo Setup # ============== self.algo = algo self.algo_start = normalize_date(self.sim_params.first_open) # ============== # Snapshot Setup # ============== # The algorithm's data as of our most recent event. # We want an object that will have empty objects as default # values on missing keys. self.current_data = BarData() # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if 'algo_dt' not in record.extra: record.extra['algo_dt'] = self.simulation_dt self.processor = Processor(inject_algo_dt) def transform(self, stream_in): """ Main generator work loop. """ # Initialize the mkt_close mkt_open = self.algo.perf_tracker.market_open mkt_close = self.algo.perf_tracker.market_close # inject the current algo # snapshot time to any log record generated. with self.processor.threadbound(): data_frequency = self.sim_params.data_frequency self._call_before_trading_start(mkt_open) for date, snapshot in stream_in: self.simulation_dt = date self.on_dt_changed(date) # If we're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. if date < self.algo_start: for event in snapshot: if event.type == DATASOURCE_TYPE.SPLIT: self.algo.blotter.process_split(event) elif event.type == DATASOURCE_TYPE.TRADE: self.update_universe(event) self.algo.perf_tracker.process_trade(event) elif event.type == DATASOURCE_TYPE.CUSTOM: self.update_universe(event) else: message = self._process_snapshot( date, snapshot, self.algo.instant_fill, ) # Perf messages are only emitted if the snapshot contained # a benchmark event. if message is not None: yield message # When emitting minutely, we re-iterate the day as a # packet with the entire days performance rolled up. if date == mkt_close: if self.algo.perf_tracker.emission_rate == 'minute': daily_rollup = self.algo.perf_tracker.to_dict( emission_type='daily') daily_rollup['daily_perf']['recorded_vars'] = \ self.algo.recorded_vars yield daily_rollup tp = self.algo.perf_tracker.todays_performance tp.rollover() if mkt_close <= self.algo.perf_tracker.last_close: before_last_close = \ mkt_close < self.algo.perf_tracker.last_close try: mkt_open, mkt_close = \ trading.environment \ .next_open_and_close(mkt_close) except trading.NoFurtherDataError: # If at the end of backtest history, # skip advancing market close. pass if self.algo.perf_tracker.emission_rate == \ 'minute': self.algo.perf_tracker\ .handle_intraday_market_close( mkt_open, mkt_close) if before_last_close: self._call_before_trading_start(mkt_open) elif data_frequency == 'daily': next_day = trading.environment.next_trading_day(date) if next_day is not None and \ next_day < self.algo.perf_tracker.last_close: self._call_before_trading_start(next_day) self.algo.portfolio_needs_update = True self.algo.account_needs_update = True self.algo.performance_needs_update = True risk_message = self.algo.perf_tracker.handle_simulation_end() yield risk_message def _process_snapshot(self, dt, snapshot, instant_fill): """ Process a stream of events corresponding to a single datetime, possibly returning a perf message to be yielded. If @instant_fill = True, we delay processing of events until after the user's call to handle_data, and we process the user's placed orders before the snapshot's events. Note that this introduces a lookahead bias, since the user effectively is effectively placing orders that are filled based on trades that happened prior to the call the handle_data. If @instant_fill = False, we process Trade events before calling handle_data. This means that orders are filled based on trades occurring in the next snapshot. This is the more conservative model, and as such it is the default behavior in TradingAlgorithm. """ # Flags indicating whether we saw any events of type TRADE and type # BENCHMARK. Respectively, these control whether or not handle_data is # called for this snapshot and whether we emit a perf message for this # snapshot. any_trade_occurred = False benchmark_event_occurred = False if instant_fill: events_to_be_processed = [] # Assign process events to variables to avoid attribute access in # innermost loops. # # Done here, to allow for perf_tracker or blotter to be swapped out # or changed in between snapshots. perf_process_trade = self.algo.perf_tracker.process_trade perf_process_transaction = self.algo.perf_tracker.process_transaction perf_process_order = self.algo.perf_tracker.process_order perf_process_benchmark = self.algo.perf_tracker.process_benchmark perf_process_split = self.algo.perf_tracker.process_split perf_process_dividend = self.algo.perf_tracker.process_dividend perf_process_commission = self.algo.perf_tracker.process_commission blotter_process_trade = self.algo.blotter.process_trade blotter_process_benchmark = self.algo.blotter.process_benchmark # Containers for the snapshotted events, so that the events are # processed in a predictable order, without relying on the sorted order # of the individual sources. # There is only one benchmark per snapshot, will be set to the current # benchmark iff it occurs. benchmark = None # trades and customs are initialized as a list since process_snapshot # is most often called on market bars, which could contain trades or # custom events. trades = [] customs = [] # splits and dividends are processed once a day. # # The avoidance of creating the list every time this is called is more # to attempt to show that this is the infrequent case of the method, # since the performance benefit from deferring the list allocation is # marginal. splits list will be allocated when a split occurs in the # snapshot. splits = None # dividends list will be allocated when a dividend occurs in the # snapshot. dividends = None for event in snapshot: if event.type == DATASOURCE_TYPE.TRADE: trades.append(event) elif event.type == DATASOURCE_TYPE.BENCHMARK: benchmark = event elif event.type == DATASOURCE_TYPE.SPLIT: if splits is None: splits = [] splits.append(event) elif event.type == DATASOURCE_TYPE.CUSTOM: customs.append(event) elif event.type == DATASOURCE_TYPE.DIVIDEND: if dividends is None: dividends = [] dividends.append(event) else: raise log.warn("Unrecognized event=%s".format(event)) # Handle benchmark first. # # Internal broker implementation depends on the benchmark being # processed first so that transactions and commissions reported from # the broker can be injected. if benchmark is not None: benchmark_event_occurred = True perf_process_benchmark(benchmark) for txn, order in blotter_process_benchmark(benchmark): if txn.type == DATASOURCE_TYPE.TRANSACTION: perf_process_transaction(txn) elif txn.type == DATASOURCE_TYPE.COMMISSION: perf_process_commission(txn) perf_process_order(order) for trade in trades: self.update_universe(trade) any_trade_occurred = True if instant_fill: events_to_be_processed.append(trade) else: for txn, order in blotter_process_trade(trade): if txn.type == DATASOURCE_TYPE.TRANSACTION: perf_process_transaction(txn) elif txn.type == DATASOURCE_TYPE.COMMISSION: perf_process_commission(txn) perf_process_order(order) perf_process_trade(trade) for custom in customs: self.update_universe(custom) if splits is not None: for split in splits: # process_split is not assigned to a variable since it is # called rarely compared to the other event processors. self.algo.blotter.process_split(split) perf_process_split(split) if dividends is not None: for dividend in dividends: perf_process_dividend(dividend) if any_trade_occurred: new_orders = self._call_handle_data() for order in new_orders: perf_process_order(order) if instant_fill: # Now that handle_data has been called and orders have been placed, # process the event stream to fill user orders based on the events # from this snapshot. for trade in events_to_be_processed: for txn, order in blotter_process_trade(trade): if txn is not None: perf_process_transaction(txn) if order is not None: perf_process_order(order) perf_process_trade(trade) if benchmark_event_occurred: return self.get_message(dt) else: return None def _call_handle_data(self): """ Call the user's handle_data, returning any orders placed by the algo during the call. """ self.algo.event_manager.handle_data( self.algo, self.current_data, self.simulation_dt, ) orders = self.algo.blotter.new_orders self.algo.blotter.new_orders = [] return orders def _call_before_trading_start(self, dt): dt = normalize_date(dt) self.simulation_dt = dt self.on_dt_changed(dt) self.algo.before_trading_start() def on_dt_changed(self, dt): if self.algo.datetime != dt: self.algo.on_dt_changed(dt) def get_message(self, dt): """ Get a perf message for the given datetime. """ # Ensure that updated_portfolio has been called at least once for this # dt before we emit a perf message. This is a no-op if # updated_portfolio has already been called this dt. self.algo.updated_portfolio() self.algo.updated_account() rvars = self.algo.recorded_vars if self.algo.perf_tracker.emission_rate == 'daily': perf_message = \ self.algo.perf_tracker.handle_market_close_daily() perf_message['daily_perf']['recorded_vars'] = rvars return perf_message elif self.algo.perf_tracker.emission_rate == 'minute': self.algo.perf_tracker.handle_minute_close(dt) perf_message = self.algo.perf_tracker.to_dict() perf_message['minute_perf']['recorded_vars'] = rvars return perf_message def update_universe(self, event): """ Update the universe with new event information. """ # Update our knowledge of this event's sid # rather than use if event.sid in ..., just trying # and handling the exception is significantly faster try: sid_data = self.current_data[event.sid] except KeyError: sid_data = self.current_data[event.sid] = SIDData(event.sid) sid_data.__dict__.update(event.__dict__)
import sys from kute.easylog.easylog import geteasylog from logbook import StreamHandler, DEBUG, Processor, Logger easylog = geteasylog() StreamHandler(sys.stdout, level=DEBUG).push_application() def printrecorddetail(record): easylog.info(record.channel) # name of the logger easylog.info(record.dispatcher.name) easylog.info(record.exception_message) easylog.info(record.exception_name) easylog.info(record.extra) easylog.info(record.filename) easylog.info(record.level) easylog.info(record.level_name) easylog.info(record.message) easylog.info(record.msg) easylog.info(record.thread_name) easylog.info(record.time) easylog.info(record.to_dict(True)) # 纵览全部日志属性 if __name__ == "__main__": with Processor(printrecorddetail).applicationbound(): mylog = Logger("log-record-app-name") mylog.info("log record detail")
class AlgorithmSimulator(object): EMISSION_TO_PERF_KEY_MAP = { 'minute': 'minute_perf', 'daily': 'daily_perf' } def get_hash(self): """ There should only ever be one TSC in the system, so we don't bother passing args into the hash. """ return self.__class__.__name__ + hash_args() def __init__(self, algo, sim_params): # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params # ============== # Algo Setup # ============== self.algo = algo self.algo_start = self.sim_params.first_open self.algo_start = self.algo_start.replace(hour=0, minute=0, second=0, microsecond=0) # ============== # Snapshot Setup # ============== # The algorithm's data as of our most recent event. # We want an object that will have empty objects as default # values on missing keys. self.current_data = BarData() # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if not 'algo_dt' in record.extra: record.extra['algo_dt'] = self.simulation_dt self.processor = Processor(inject_algo_dt) @property def perf_key(self): return self.EMISSION_TO_PERF_KEY_MAP[ self.algo.perf_tracker.emission_rate] def process_event(self, event): process_trade = self.algo.blotter.process_trade for txn, order in process_trade(event): self.algo.perf_tracker.process_event(txn) self.algo.perf_tracker.process_event(order) self.algo.perf_tracker.process_event(event) def transform(self, stream_in): """ Main generator work loop. """ # Initialize the mkt_close mkt_close = self.algo.perf_tracker.market_close # inject the current algo # snapshot time to any log record generated. with self.processor.threadbound(): updated = False bm_updated = False for date, snapshot in stream_in: self.algo.set_datetime(date) self.simulation_dt = date self.algo.perf_tracker.set_date(date) self.algo.blotter.set_date(date) # If we're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. if date < self.algo_start: for event in snapshot: if event.type == DATASOURCE_TYPE.SPLIT: self.algo.blotter.process_split(event) if event.type in (DATASOURCE_TYPE.TRADE, DATASOURCE_TYPE.CUSTOM): self.update_universe(event) self.algo.perf_tracker.process_event(event) else: if self.algo.instant_fill: events = [] for event in snapshot: if event.type == DATASOURCE_TYPE.TRADE: self.update_universe(event) updated = True elif event.type == DATASOURCE_TYPE.BENCHMARK: self.algo.set_datetime(event.dt) bm_updated = True elif event.type == DATASOURCE_TYPE.CUSTOM: self.update_universe(event) updated = True elif event.type == DATASOURCE_TYPE.SPLIT: self.algo.blotter.process_split(event) # If we are instantly filling orders we process # them after handle_data(). if not self.algo.instant_fill: self.process_event(event) else: events.append(event) # Send the current state of the universe # to the user's algo. if updated: self.algo.handle_data(self.current_data) updated = False # run orders placed in the algorithm call # above through perf tracker before emitting # the perf packet, so that the perf includes # placed orders for order in self.algo.blotter.new_orders: self.algo.perf_tracker.process_event(order) self.algo.blotter.new_orders = [] # If we are instantly filling we execute orders # in this iteration rather than the next. if self.algo.instant_fill: for event in events: self.process_event(event) # The benchmark is our internal clock. When it # updates, we need to emit a performance message. if bm_updated: bm_updated = False self.algo.updated_portfolio() yield self.get_message(date) # When emitting minutely, we re-iterate the day as a # packet with the entire days performance rolled up. if self.algo.perf_tracker.emission_rate == 'minute': if date == mkt_close: daily_rollup = self.algo.perf_tracker.to_dict( emission_type='daily' ) daily_rollup['daily_perf']['recorded_vars'] = \ self.algo.recorded_vars yield daily_rollup tp = self.algo.perf_tracker.todays_performance tp.rollover() if mkt_close <= self.algo.perf_tracker.last_close: _, mkt_close = \ trading.environment.next_open_and_close( mkt_close ) self.algo.perf_tracker.handle_intraday_close() self.algo.portfolio_needs_update = True risk_message = self.algo.perf_tracker.handle_simulation_end() yield risk_message def get_message(self, date): rvars = self.algo.recorded_vars if self.algo.perf_tracker.emission_rate == 'daily': perf_message = \ self.algo.perf_tracker.handle_market_close() perf_message['daily_perf']['recorded_vars'] = rvars return perf_message elif self.algo.perf_tracker.emission_rate == 'minute': self.algo.perf_tracker.handle_minute_close(date) perf_message = self.algo.perf_tracker.to_dict() perf_message['minute_perf']['recorded_vars'] = rvars return perf_message def update_universe(self, event): """ Update the universe with new event information. """ # Update our knowledge of this event's sid # rather than use if event.sid in ..., just trying # and handling the exception is significantly faster try: sid_data = self.current_data[event.sid] except KeyError: sid_data = self.current_data[event.sid] = SIDData() sid_data.__dict__.update(event.__dict__)
from utils import get_ip def inject_information(record): record.extra['ip'] = get_ip() log_format = u'[{record.time:%Y-%m-%d %H:%M}] {record.channel} - {record.level_name}: {record.message} \t({record.extra[ip]})' # a nested handler setup can be used to configure more complex setups setup = NestedSetup([ #StderrHandler(format_string=u'[{record.time:%Y-%m-%d %H:%M}] {record.channel} - {record.level_name}: {record.message} \t({record.extra[ip]})'), StreamHandler(sys.stdout, format_string=log_format), # then write messages that are at least warnings to to a logfile FileHandler(os.environ['QTRADE_LOG'], level='WARNING'), Processor(inject_information) ]) color_setup = NestedSetup([ StreamHandler(sys.stdout, format_string=log_format), ColorizedStderrHandler(format_string=log_format, level='NOTICE'), Processor(inject_information) ]) remote_setup = NestedSetup([ ZeroMQHandler('tcp://127.0.0.1:5540'), Processor(inject_information) ]) log = Logger('Trade Labo')
class AlgorithmSimulator(object): EMISSION_TO_PERF_KEY_MAP = { 'minute': 'minute_perf', 'daily': 'daily_perf' } def get_hash(self): """ There should only ever be one TSC in the system, so we don't bother passing args into the hash. """ return self.__class__.__name__ + hash_args() def __init__(self, algo, sim_params): # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params # ============== # Algo Setup # ============== self.algo = algo self.algo_start = self.sim_params.first_open self.algo_start = self.algo_start.replace(hour=0, minute=0, second=0, microsecond=0) # ============== # Snapshot Setup # ============== # The algorithm's data as of our most recent event. # We want an object that will have empty objects as default # values on missing keys. self.current_data = BarData() # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if 'algo_dt' not in record.extra: record.extra['algo_dt'] = self.simulation_dt self.processor = Processor(inject_algo_dt) @property def perf_key(self): return self.EMISSION_TO_PERF_KEY_MAP[ self.algo.perf_tracker.emission_rate] def process_event(self, event): process_trade = self.algo.blotter.process_trade for txn, order in process_trade(event): self.algo.perf_tracker.process_event(txn) self.algo.perf_tracker.process_event(order) self.algo.perf_tracker.process_event(event) def transform(self, stream_in): """ Main generator work loop. """ # Initialize the mkt_close mkt_open = self.algo.perf_tracker.market_open mkt_close = self.algo.perf_tracker.market_close # inject the current algo # snapshot time to any log record generated. with self.processor.threadbound(): for date, snapshot in stream_in: self.simulation_dt = date self.algo.on_dt_changed(date) # If we're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. if date < self.algo_start: for event in snapshot: if event.type == DATASOURCE_TYPE.SPLIT: self.algo.blotter.process_split(event) if event.type in (DATASOURCE_TYPE.TRADE, DATASOURCE_TYPE.CUSTOM): self.update_universe(event) self.algo.perf_tracker.process_event(event) else: message = self._process_snapshot( date, snapshot, self.algo.instant_fill, ) # Perf messages are only emitted if the snapshot contained # a benchmark event. if message is not None: yield message # When emitting minutely, we re-iterate the day as a # packet with the entire days performance rolled up. if self.algo.perf_tracker.emission_rate == 'minute': if date == mkt_close: daily_rollup = self.algo.perf_tracker.to_dict( emission_type='daily' ) daily_rollup['daily_perf']['recorded_vars'] = \ self.algo.recorded_vars yield daily_rollup tp = self.algo.perf_tracker.todays_performance tp.rollover() if mkt_close <= self.algo.perf_tracker.last_close: try: mkt_open, mkt_close = \ trading.environment \ .next_open_and_close(mkt_close) except trading.NoFurtherDataError: # If at the end of backtest history, # skip advancing market close. pass self.algo.perf_tracker\ .handle_intraday_market_close( mkt_open, mkt_close) self.algo.portfolio_needs_update = True risk_message = self.algo.perf_tracker.handle_simulation_end() yield risk_message def _process_snapshot(self, dt, snapshot, instant_fill): """ Process a stream of events corresponding to a single datetime, possibly returning a perf message to be yielded. If @instant_fill = True, we delay processing of events until after the user's call to handle_data, and we process the user's placed orders before the snapshot's events. Note that this introduces a lookahead bias, since the user effectively is effectively placing orders that are filled based on trades that happened prior to the call the handle_data. If @instant_fill = False, we process Trade events before calling handle_data. This means that orders are filled based on trades occurring in the next snapshot. This is the more conservative model, and as such it is the default behavior in TradingAlgorithm. """ # Flags indicating whether we saw any events of type TRADE and type # BENCHMARK. Respectively, these control whether or not handle_data is # called for this snapshot and whether we emit a perf message for this # snapshot. any_trade_occurred = False benchmark_event_occurred = False if instant_fill: events_to_be_processed = [] for event in snapshot: if event.type == DATASOURCE_TYPE.TRADE: self.update_universe(event) any_trade_occurred = True elif event.type == DATASOURCE_TYPE.BENCHMARK: benchmark_event_occurred = True elif event.type == DATASOURCE_TYPE.CUSTOM: self.update_universe(event) elif event.type == DATASOURCE_TYPE.SPLIT: self.algo.blotter.process_split(event) if not self.algo.instant_fill: self.process_event(event) else: events_to_be_processed.append(event) if any_trade_occurred: new_orders = self._call_handle_data() for order in new_orders: self.algo.perf_tracker.process_event(order) if instant_fill: # Now that handle_data has been called and orders have been placed, # process the event stream to fill user orders based on the events # from this snapshot. for event in events_to_be_processed: self.process_event(event) if benchmark_event_occurred: self.algo.updated_portfolio() return self.get_message(dt) else: return None def _call_handle_data(self): """ Call the user's handle_data, returning any orders placed by the algo during the call. """ self.algo.handle_data(self.current_data) orders = self.algo.blotter.new_orders self.algo.blotter.new_orders = [] return orders def get_message(self, dt): """ Get a perf message for the given datetime. """ rvars = self.algo.recorded_vars if self.algo.perf_tracker.emission_rate == 'daily': perf_message = \ self.algo.perf_tracker.handle_market_close_daily() perf_message['daily_perf']['recorded_vars'] = rvars return perf_message elif self.algo.perf_tracker.emission_rate == 'minute': self.algo.perf_tracker.handle_minute_close(dt) perf_message = self.algo.perf_tracker.to_dict() perf_message['minute_perf']['recorded_vars'] = rvars return perf_message def update_universe(self, event): """ Update the universe with new event information. """ # Update our knowledge of this event's sid # rather than use if event.sid in ..., just trying # and handling the exception is significantly faster try: sid_data = self.current_data[event.sid] except KeyError: sid_data = self.current_data[event.sid] = SIDData() sid_data.__dict__.update(event.__dict__)
class AlgorithmSimulator(object): def __init__(self, order_book, perf_tracker, algo, algo_start): # ========== # Algo Setup # ========== # We extract the order book from the txn client so that # the algo can place new orders. self.order_book = order_book self.perf_tracker = perf_tracker self.algo = algo self.algo_start = algo_start.replace(hour=0, minute=0, second=0, microsecond=0) # Monkey patch the user algorithm to place orders in the # TransactionSimulator's order book and use our logger. self.algo.set_order(self.order) # ============== # Snapshot Setup # ============== # The algorithm's universe as of our most recent event. # We want an ndict that will have empty objects as default # values on missing keys. self.universe = ndict(internal=defaultdict(SIDData)) # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None self.snapshot_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if not 'algo_dt' in record.extra: record.extra['algo_dt'] = self.snapshot_dt self.processor = Processor(inject_algo_dt) def order(self, sid, amount, limit_price=None, stop_price=None): # something could be done with amount to further divide # between buy by share count OR buy shares up to a dollar amount # numeric == share count AND "$dollar.cents" == cost amount """ amount > 0 :: Buy/Cover amount < 0 :: Sell/Short Market order: order(sid,amount) Limit order: order(sid,amount, limit_price) Stop order: order(sid,amount, None, stop_price) StopLimit order: order(sid,amount, limit_price, stop_price) """ # just validates amount and passes rest on to TransactionSimulator # Tell the user if they try to buy 0 shares of something. if amount == 0: zero_message = "Requested to trade zero shares of {psid}".format( psid=sid) log.debug(zero_message) # Don't bother placing orders for 0 shares. return order = Order( **{ 'dt': self.simulation_dt, 'sid': sid, 'amount': int(amount), 'filled': 0, 'stop': stop_price, 'limit': limit_price }) # Add non-zero orders to the order book. # !!!IMPORTANT SIDE-EFFECT!!! # This modifies the internal state of the transaction # simulator so that it can fill the placed order when it # receives its next message. err_str = self.order_book.place_order(order) if err_str is not None and len(err_str) > 0: # error, trade was not placed, log it out log.debug(err_str) def transform(self, stream_in): """ Main generator work loop. """ # Set the simulation date to be the first event we see. peek_date, peek_snapshot = next(stream_in) self.simulation_dt = peek_date # Stitch back together the generator by placing the peeked # event back in front stream = itertools.chain([(peek_date, peek_snapshot)], stream_in) # inject the current algo # snapshot time to any log record generated. with self.processor.threadbound(): for date, snapshot in stream: # We're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. if date < self.algo_start: for event in snapshot: del event['perf_messages'] self.update_universe(event) # Regular snapshot. Update the universe and send a snapshot # to handle data. else: for event in snapshot: for perf_message in event.perf_messages: # append current values of recorded vars # to emitted message perf_message['daily_perf']['recorded_vars'] =\ self.algo.recorded_vars yield perf_message del event['perf_messages'] self.update_universe(event) # Send the current state of the universe # to the user's algo. self.simulate_snapshot(date) perf_messages, risk_message = \ self.perf_tracker.handle_simulation_end() for message in perf_messages: message['daily_perf']['recorded_vars'] =\ self.algo.recorded_vars yield message yield risk_message def update_universe(self, event): """ Update the universe with new event information. """ # Update our portfolio. self.algo.set_portfolio(event.portfolio) # the portfolio is modified by each event passed into the # performance tracker (prices and amounts can change). # Performance tracker sends back an up-to-date portfolio # with each event. However, we provide the portfolio to # the algorithm via a setter method, rather than as part # of the event data sent to handle_data. To avoid # confusion, we remove it from the event here. del event.portfolio # Update our knowledge of this event's sid sid_data = self.universe[event.sid] sid_data.__dict__.update(event.__dict__) def simulate_snapshot(self, date): """ Run the user's algo against our current snapshot and update the algo's simulated time. """ # Needs to be set so that we inject the proper date into algo # log/print lines. self.snapshot_dt = date self.algo.set_datetime(self.snapshot_dt) self.algo.handle_data(self.universe) # Update the simulation time. self.simulation_dt = date
class AlgorithmSimulator(object): EMISSION_TO_PERF_KEY_MAP = {'minute': 'minute_perf', 'daily': 'daily_perf'} def get_hash(self): """ There should only ever be one TSC in the system, so we don't bother passing args into the hash. """ return self.__class__.__name__ + hash_args() def __init__(self, algo, sim_params): # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params # ============== # Algo Setup # ============== self.algo = algo self.algo_start = self.sim_params.first_open self.algo_start = self.algo_start.replace(hour=0, minute=0, second=0, microsecond=0) # ============== # Snapshot Setup # ============== # The algorithm's data as of our most recent event. # We want an object that will have empty objects as default # values on missing keys. self.current_data = BarData() # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if not 'algo_dt' in record.extra: record.extra['algo_dt'] = self.simulation_dt self.processor = Processor(inject_algo_dt) @property def perf_key(self): return self.EMISSION_TO_PERF_KEY_MAP[ self.algo.perf_tracker.emission_rate] def transform(self, stream_in): """ Main generator work loop. """ # Initialize the mkt_close mkt_close = self.algo.perf_tracker.market_close # inject the current algo # snapshot time to any log record generated. with self.processor.threadbound(): updated = False bm_updated = False for date, snapshot in stream_in: self.algo.set_datetime(date) self.simulation_dt = date self.algo.perf_tracker.set_date(date) self.algo.blotter.set_date(date) # If we're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. if date < self.algo_start: for event in snapshot: if event.type in (DATASOURCE_TYPE.TRADE, DATASOURCE_TYPE.CUSTOM): self.update_universe(event) self.algo.perf_tracker.process_event(event) else: events = [] for event in snapshot: if event.type in (DATASOURCE_TYPE.TRADE, DATASOURCE_TYPE.CUSTOM): self.update_universe(event) updated = True if event.type == DATASOURCE_TYPE.BENCHMARK: self.algo.set_datetime(event.dt) bm_updated = True # Save events to stream through blotter below. events.append(event) # Update our portfolio. self.algo.set_portfolio( self.algo.perf_tracker.get_portfolio()) # Send the current state of the universe # to the user's algo. if updated: self.algo.handle_data(self.current_data) updated = False # run orders placed in the algorithm call # above through perf tracker before emitting # the perf packet, so that the perf includes # placed orders for order in self.algo.blotter.new_orders: self.algo.perf_tracker.process_event(order) self.algo.blotter.new_orders = [] # Fill orders for event in events: process_trade = self.algo.blotter.process_trade for txn, order in process_trade(event): self.algo.perf_tracker.process_event(txn) self.algo.perf_tracker.process_event(order) self.algo.perf_tracker.process_event(event) # The benchmark is our internal clock. When it # updates, we need to emit a performance message. if bm_updated: bm_updated = False yield self.get_message(date) # When emitting minutely, we re-iterate the day as a # packet with the entire days performance rolled up. if self.algo.perf_tracker.emission_rate == 'minute': if date == mkt_close: daily_rollup = self.algo.perf_tracker.to_dict( emission_type='daily') daily_rollup['daily_perf']['recorded_vars'] = \ self.algo.recorded_vars yield daily_rollup tp = self.algo.perf_tracker.todays_performance tp.rollover() if mkt_close < self.algo.perf_tracker.last_close: _, mkt_close = \ trading.environment.next_open_and_close( mkt_close ) self.algo.perf_tracker.handle_intraday_close() risk_message = self.algo.perf_tracker.handle_simulation_end() yield risk_message def get_message(self, date): rvars = self.algo.recorded_vars if self.algo.perf_tracker.emission_rate == 'daily': perf_message = \ self.algo.perf_tracker.handle_market_close() perf_message['daily_perf']['recorded_vars'] = rvars return perf_message elif self.algo.perf_tracker.emission_rate == 'minute': self.algo.perf_tracker.handle_minute_close(date) perf_message = self.algo.perf_tracker.to_dict() perf_message['minute_perf']['recorded_vars'] = rvars return perf_message def update_universe(self, event): """ Update the universe with new event information. """ # Update our knowledge of this event's sid sid_data = self.current_data[event.sid] sid_data.__dict__.update(event.__dict__)
class AlgorithmSimulator(object): def __init__(self, order_book, algo, algo_start): # ========== # Algo Setup # ========== # We extract the order book from the txn client so that # the algo can place new orders. self.order_book = order_book self.algo = algo self.algo_start = algo_start.replace(hour=0, minute=0, second=0, microsecond=0) # Monkey patch the user algorithm to place orders in the # TransactionSimulator's order book and use our logger. self.algo.set_order(self.order) # ============== # Snapshot Setup # ============== # The algorithm's universe as of our most recent event. # We want an ndict that will have empty ndicts as default # values on missing keys. self.universe = ndict(internal=defaultdict(ndict)) # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None self.snapshot_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): record.extra["algo_dt"] = self.snapshot_dt self.processor = Processor(inject_algo_dt) def order(self, sid, amount): """ Closure to pass into the user's algo to allow placing orders into the transaction simulator's dict of open orders. """ order = ndict({"dt": self.simulation_dt, "sid": sid, "amount": int(amount), "filled": 0}) # Tell the user if they try to buy 0 shares of something. if order.amount == 0: zero_message = "Requested to trade zero shares of {sid}".format(sid=order.sid) log.debug(zero_message) # Don't bother placing orders for 0 shares. return # Add non-zero orders to the order book. # !!!IMPORTANT SIDE-EFFECT!!! # This modifies the internal state of the transaction # simulator so that it can fill the placed order when it # receives its next message. self.order_book.place_order(order) def transform(self, stream_in): """ Main generator work loop. """ # inject the current algo # snapshot time to any log record generated. with self.processor.threadbound(): # Group together events with the same dt field. This depends on the # events already being sorted. for date, snapshot in groupby(stream_in, attrgetter("dt")): # Set the simulation date to be the first event we see. # This should only occur once, at the start of the test. if self.simulation_dt is None: self.simulation_dt = date # Done message has the risk report, so we yield before exiting. if date == "DONE": for event in snapshot: yield event.perf_message raise StopIteration # We're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. elif date < self.algo_start: for event in snapshot: del event["perf_message"] self.update_universe(event) # The algo has taken so long to process events that # its simulated time is later than the event time. # Update the universe and yield any perf messages # encountered, but don't call handle_data. elif date < self.simulation_dt: for event in snapshot: # Only yield if we have something interesting to say. if event.perf_message is not None: yield event.perf_message # Delete the message before updating, # so we don't send it to the user. del event["perf_message"] self.update_universe(event) # Regular snapshot. Update the universe and send a snapshot # to handle data. else: for event in snapshot: # Only yield if we have something interesting to say. if event.perf_message is not None: yield event.perf_message del event["perf_message"] self.update_universe(event) # Send the current state of the universe # to the user's algo. self.simulate_snapshot(date) def update_universe(self, event): """ Update the universe with new event information. """ # Update our portfolio. self.algo.set_portfolio(event.portfolio) # Update our knowledge of this event's sid for field in event.keys(): self.universe[event.sid][field] = event[field] def simulate_snapshot(self, date): """ Run the user's algo against our current snapshot and update the algo's simulated time. """ # Needs to be set so that we inject the proper date into algo # log/print lines. self.snapshot_dt = date self.algo.set_datetime(self.snapshot_dt) start_tic = datetime.now() self.algo.handle_data(self.universe) stop_tic = datetime.now() # How long did you take? delta = stop_tic - start_tic # Update the simulation time. self.simulation_dt = date + delta
class AlgorithmSimulator(object): EMISSION_TO_PERF_KEY_MAP = {'minute': 'minute_perf', 'daily': 'daily_perf'} def get_hash(self): """ There should only ever be one TSC in the system, so we don't bother passing args into the hash. """ return self.__class__.__name__ + hash_args() def __init__(self, algo, sim_params): # ============== # Simulation # Param Setup # ============== self.sim_params = sim_params # ============== # Algo Setup # ============== self.algo = algo self.algo_start = normalize_date(self.sim_params.first_open) # ============== # Snapshot Setup # ============== # The algorithm's data as of our most recent event. # We want an object that will have empty objects as default # values on missing keys. self.current_data = BarData() # We don't have a datetime for the current snapshot until we # receive a message. self.simulation_dt = None # ============= # Logging Setup # ============= # Processor function for injecting the algo_dt into # user prints/logs. def inject_algo_dt(record): if 'algo_dt' not in record.extra: record.extra['algo_dt'] = self.simulation_dt self.processor = Processor(inject_algo_dt) @property def perf_key(self): return self.EMISSION_TO_PERF_KEY_MAP[ self.algo.perf_tracker.emission_rate] def process_event(self, event): process_trade = self.algo.blotter.process_trade for txn, order in process_trade(event): self.algo.perf_tracker.process_event(txn) self.algo.perf_tracker.process_event(order) self.algo.perf_tracker.process_event(event) def transform(self, stream_in): """ Main generator work loop. """ # Initialize the mkt_close mkt_open = self.algo.perf_tracker.market_open mkt_close = self.algo.perf_tracker.market_close # inject the current algo # snapshot time to any log record generated. with self.processor.threadbound(): data_frequency = self.sim_params.data_frequency self._call_before_trading_start(mkt_open) for date, snapshot in stream_in: self.simulation_dt = date self.on_dt_changed(date) # If we're still in the warmup period. Use the event to # update our universe, but don't yield any perf messages, # and don't send a snapshot to handle_data. if date < self.algo_start: for event in snapshot: if event.type == DATASOURCE_TYPE.SPLIT: self.algo.blotter.process_split(event) elif event.type in (DATASOURCE_TYPE.TRADE, DATASOURCE_TYPE.CUSTOM): self.update_universe(event) self.algo.perf_tracker.process_event(event) else: message = self._process_snapshot( date, snapshot, self.algo.instant_fill, ) # Perf messages are only emitted if the snapshot contained # a benchmark event. if message is not None: yield message # When emitting minutely, we re-iterate the day as a # packet with the entire days performance rolled up. if date == mkt_close: if self.algo.perf_tracker.emission_rate == 'minute': daily_rollup = self.algo.perf_tracker.to_dict( emission_type='daily') daily_rollup['daily_perf']['recorded_vars'] = \ self.algo.recorded_vars yield daily_rollup tp = self.algo.perf_tracker.todays_performance tp.rollover() if mkt_close <= self.algo.perf_tracker.last_close: before_last_close = \ mkt_close < self.algo.perf_tracker.last_close try: mkt_open, mkt_close = \ trading.environment \ .next_open_and_close(mkt_close) except trading.NoFurtherDataError: # If at the end of backtest history, # skip advancing market close. pass if (self.algo.perf_tracker.emission_rate == 'minute'): self.algo.perf_tracker\ .handle_intraday_market_close( mkt_open, mkt_close) if before_last_close: self._call_before_trading_start(mkt_open) elif data_frequency == 'daily': next_day = trading.environment.next_trading_day(date) if (next_day is not None and next_day < self.algo.perf_tracker.last_close): self._call_before_trading_start(next_day) self.algo.portfolio_needs_update = True self.algo.account_needs_update = True self.algo.performance_needs_update = True risk_message = self.algo.perf_tracker.handle_simulation_end() yield risk_message def _process_snapshot(self, dt, snapshot, instant_fill): """ Process a stream of events corresponding to a single datetime, possibly returning a perf message to be yielded. If @instant_fill = True, we delay processing of events until after the user's call to handle_data, and we process the user's placed orders before the snapshot's events. Note that this introduces a lookahead bias, since the user effectively is effectively placing orders that are filled based on trades that happened prior to the call the handle_data. If @instant_fill = False, we process Trade events before calling handle_data. This means that orders are filled based on trades occurring in the next snapshot. This is the more conservative model, and as such it is the default behavior in TradingAlgorithm. """ # Flags indicating whether we saw any events of type TRADE and type # BENCHMARK. Respectively, these control whether or not handle_data is # called for this snapshot and whether we emit a perf message for this # snapshot. any_trade_occurred = False benchmark_event_occurred = False if instant_fill: events_to_be_processed = [] for event in snapshot: if event.type == DATASOURCE_TYPE.TRADE: self.update_universe(event) any_trade_occurred = True elif event.type == DATASOURCE_TYPE.BENCHMARK: benchmark_event_occurred = True elif event.type == DATASOURCE_TYPE.CUSTOM: self.update_universe(event) elif event.type == DATASOURCE_TYPE.SPLIT: self.algo.blotter.process_split(event) if not instant_fill: self.process_event(event) else: events_to_be_processed.append(event) if any_trade_occurred: new_orders = self._call_handle_data() for order in new_orders: self.algo.perf_tracker.process_event(order) if instant_fill: # Now that handle_data has been called and orders have been placed, # process the event stream to fill user orders based on the events # from this snapshot. for event in events_to_be_processed: self.process_event(event) if benchmark_event_occurred: return self.get_message(dt) else: return None def _call_handle_data(self): """ Call the user's handle_data, returning any orders placed by the algo during the call. """ self.algo.event_manager.handle_data( self.algo, self.current_data, self.simulation_dt, ) orders = self.algo.blotter.new_orders self.algo.blotter.new_orders = [] return orders def _call_before_trading_start(self, dt): dt = normalize_date(dt) self.simulation_dt = dt self.on_dt_changed(dt) self.algo.before_trading_start() def on_dt_changed(self, dt): if self.algo.datetime != dt: self.algo.on_dt_changed(dt) def get_message(self, dt): """ Get a perf message for the given datetime. """ # Ensure that updated_portfolio has been called at least once for this # dt before we emit a perf message. This is a no-op if # updated_portfolio has already been called this dt. self.algo.updated_portfolio() self.algo.updated_account() rvars = self.algo.recorded_vars if self.algo.perf_tracker.emission_rate == 'daily': perf_message = \ self.algo.perf_tracker.handle_market_close_daily() perf_message['daily_perf']['recorded_vars'] = rvars return perf_message elif self.algo.perf_tracker.emission_rate == 'minute': self.algo.perf_tracker.handle_minute_close(dt) perf_message = self.algo.perf_tracker.to_dict() perf_message['minute_perf']['recorded_vars'] = rvars return perf_message def update_universe(self, event): """ Update the universe with new event information. """ # Update our knowledge of this event's sid # rather than use if event.sid in ..., just trying # and handling the exception is significantly faster try: sid_data = self.current_data[event.sid] except KeyError: sid_data = self.current_data[event.sid] = SIDData(event.sid) sid_data.__dict__.update(event.__dict__)
# GMail 邮箱 ghandler = GMailHandler( account_id="*****@*****.**", password="******", recipients=["*****@*****.**"], format_string=formatstr ) def main(): mylog = Logger("MailHandler-APP") mylog.info("mailhandler message") def inject_other_info(record): record.extra['myscret'] = "do not tell you" record.extra.update( # some other info pp="pp info", ip="127.0.0.1", url="http://logbook.readthedocs.io/en/stable/", method="GET", myscret="do not tell you yet" ) if __name__ == "__main__": with ghandler.threadbound(): with Processor(callback=inject_other_info).threadbound(): main()