def test_orders_stop(self, name, order_data, event_data, expected): data = order_data data['asset'] = self.ASSET133 order = Order(**data) if expected['transaction']: expected['transaction']['asset'] = self.ASSET133 event_data['asset'] = self.ASSET133 assets = ( (133, pd.DataFrame( { 'open': [event_data['open']], 'high': [event_data['high']], 'low': [event_data['low']], 'close': [event_data['close']], 'volume': [event_data['volume']], }, index=[pd.Timestamp('2006-01-05 14:31', tz='UTC')], )), ) days = pd.date_range( start=normalize_date(self.minutes[0]), end=normalize_date(self.minutes[-1]) ) with tmp_bcolz_equity_minute_bar_reader( self.trading_calendar, days, assets) as reader: data_portal = DataPortal( self.env.asset_finder, self.trading_calendar, first_trading_day=reader.first_trading_day, equity_minute_reader=reader, ) slippage_model = VolumeShareSlippage() try: dt = pd.Timestamp('2006-01-05 14:31', tz='UTC') bar_data = BarData( data_portal, lambda: dt, self.sim_params.data_frequency, self.trading_calendar, NoRestrictions(), ) _, txn = next(slippage_model.simulate( bar_data, self.ASSET133, [order], )) except StopIteration: txn = None if expected['transaction'] is None: self.assertIsNone(txn) else: self.assertIsNotNone(txn) for key, value in expected['transaction'].items(): self.assertEquals(value, txn[key])
def _pipeline_output(self, pipeline, chunks, name): # This method is taken from TradingAlgorithm. """ Internal implementation of `pipeline_output`. For Live Algo's we have to get the previous session as the Pipeline wont work without, it will extrapolate such that it tries to get data for get_datetime which is today """ today = normalize_date(self.get_datetime()) prev_session = normalize_date( self.trading_calendar.previous_open(today)) log.info('today in _pipeline_output : {}'.format(prev_session)) try: data = self._pipeline_cache.get(name, prev_session) except KeyError: # Calculate the next block. data, valid_until = self.run_pipeline( pipeline, prev_session, next(chunks), ) self._pipeline_cache.set(name, data, valid_until) # Now that we have a cached result, try to return the data for today. try: return data.loc[prev_session] except KeyError: # This happens if no assets passed the pipeline screen on a given # day. return pd.DataFrame(index=[], columns=data.columns)
def __init__(self, start_session, end_session, trading_calendar, capital_base=DEFAULT_CAPITAL_BASE, emission_rate='daily', data_frequency='daily', arena='backtest'): assert type(start_session) == pd.Timestamp assert type(end_session) == pd.Timestamp assert trading_calendar is not None, \ "Must pass in trading calendar!" assert start_session <= end_session, \ "Period start falls after period end." assert start_session <= trading_calendar.last_trading_session, \ "Period start falls after the last known trading day." assert end_session >= trading_calendar.first_trading_session, \ "Period end falls before the first known trading day." # chop off any minutes or hours on the given start and end dates, # as we only support session labels here (and we represent session # labels as midnight UTC). self._start_session = normalize_date(start_session) self._end_session = normalize_date(end_session) self._capital_base = capital_base self._emission_rate = emission_rate self._data_frequency = data_frequency # copied to algorithm's environment for runtime access self._arena = arena self._trading_calendar = trading_calendar if not trading_calendar.is_session(self._start_session): # if the start date is not a valid session in this calendar, # push it forward to the first valid session self._start_session = trading_calendar.minute_to_session_label( self._start_session ) if not trading_calendar.is_session(self._end_session): # if the end date is not a valid session in this calendar, # pull it backward to the last valid session before the given # end date. self._end_session = trading_calendar.minute_to_session_label( self._end_session, direction="previous" ) self._first_open = trading_calendar.open_and_close_for_session( self._start_session )[0] self._last_close = trading_calendar.open_and_close_for_session( self._end_session )[1]
def __init__(self, start_session, end_session, trading_calendar, capital_base=DEFAULT_CAPITAL_BASE, emission_rate='daily', data_frequency='daily', arena='backtest', execution_id=None): assert type(start_session) == pd.Timestamp assert type(end_session) == pd.Timestamp assert trading_calendar is not None, \ "Must pass in trading calendar!" assert start_session <= end_session, \ "Period start falls after period end." assert start_session <= trading_calendar.last_trading_session, \ "Period start falls after the last known trading day." assert end_session >= trading_calendar.first_trading_session, \ "Period end falls before the first known trading day." # chop off any minutes or hours on the given start and end dates, # as we only support session labels here (and we represent session # labels as midnight UTC). self._start_session = normalize_date(start_session) self._end_session = normalize_date(end_session) self._capital_base = capital_base if execution_id: self._execution_id = execution_id self._emission_rate = emission_rate self._data_frequency = data_frequency # copied to algorithm's environment for runtime access self._arena = arena self._trading_calendar = trading_calendar if not trading_calendar.is_session(self._start_session): # if the start date is not a valid session in this calendar, # push it forward to the first valid session self._start_session = trading_calendar.minute_to_session_label( self._start_session) if not trading_calendar.is_session(self._end_session): # if the end date is not a valid session in this calendar, # pull it backward to the last valid session before the given # end date. self._end_session = trading_calendar.minute_to_session_label( self._end_session, direction="previous") self._first_open = trading_calendar.open_and_close_for_session( self._start_session)[0] self._last_close = trading_calendar.open_and_close_for_session( self._end_session)[1]
def rebalance(context, data): log.info("rebalance) " + str(normalize_date(get_datetime()))) # Pipeline data will be a dataframe with boolean columns named 'longs' and # 'shorts'. pipeline_data = context.pipeline_data.dropna() log.info(pipeline_data.head()) all_assets = pipeline_data.index longs = all_assets[pipeline_data.longs] shorts = all_assets[pipeline_data.shorts] record(universe_size=len(all_assets)) # Build a 2x-leveraged, equal-weight, long-short portfolio. one_third = 1.0 / 3.0 for asset in longs: order_target_percent(asset, one_third) for asset in shorts: order_target_percent(asset, -one_third) # Remove any assets that should no longer be in our portfolio. portfolio_assets = longs | shorts positions = context.portfolio.positions for asset in viewkeys(positions) - set(portfolio_assets): # This will fail if the asset was removed from our portfolio because it # was delisted. if data.can_trade(asset): order_target_percent(asset, 0)
def handle_data(context, data): today = normalize_date(get_datetime()) results = pipeline_output('test') expect_over_300 = { AAPL: today < self.AAPL_split_date, MSFT: False, BRK_A: True, } for asset in assets: should_pass_filter = expect_over_300[asset] if set_screen and not should_pass_filter: self.assertNotIn(asset, results.index) continue asset_results = results.loc[asset] self.assertEqual(asset_results['filter'], should_pass_filter) for length in vwaps: computed = results.loc[asset, vwap_key(length)] expected = vwaps[length][asset].loc[today] # Only having two places of precision here is a bit # unfortunate. assert_almost_equal(computed, expected, decimal=2)
def record_vars(context, data): """ Plot variables at the end of each day. """ log.info("record_vars) " + str(normalize_date(get_datetime()))) pass
def _get_adjustments_in_range(self, asset, dts, field): """ Get the Float64Multiply objects to pass to an AdjustedArrayWindow. For the use of AdjustedArrayWindow in the loader, which looks back from current simulation time back to a window of data the dictionary is structured with: - the key into the dictionary for adjustments is the location of the day from which the window is being viewed. - the start of all multiply objects is always 0 (in each window all adjustments are overlapping) - the end of the multiply object is the location before the calendar location of the adjustment action, making all days before the event adjusted. Parameters ---------- asset : Asset The assets for which to get adjustments. dts : iterable of datetime64-like The dts for which adjustment data is needed. field : str OHLCV field for which to get the adjustments. Returns ------- out : dict[loc -> Float64Multiply] The adjustments as a dict of loc -> Float64Multiply """ sid = int(asset) start = normalize_date(dts[0]) end = normalize_date(dts[-1]) adjs = {} if field != 'volume' and field != 'open_interest': mergers = self._adjustments_reader.get_adjustments_for_sid( 'mergers', sid) for m in mergers: dt = m[0] if start < dt <= end: end_loc = dts.searchsorted(dt) adj_loc = end_loc mult = Float64Multiply(0, end_loc - 1, 0, 0, m[1]) try: adjs[adj_loc].append(mult) except KeyError: adjs[adj_loc] = [mult] divs = self._adjustments_reader.get_adjustments_for_sid( 'dividends', sid) for d in divs: dt = d[0] if start < dt <= end: end_loc = dts.searchsorted(dt) adj_loc = end_loc mult = Float64Multiply(0, end_loc - 1, 0, 0, d[1]) try: adjs[adj_loc].append(mult) except KeyError: adjs[adj_loc] = [mult] splits = self._adjustments_reader.get_adjustments_for_sid( 'splits', sid) for s in splits: dt = s[0] if start < dt <= end: if field == 'volume' or field == 'open_interest': ratio = 1.0 / s[1] else: ratio = s[1] end_loc = dts.searchsorted(dt) adj_loc = end_loc mult = Float64Multiply(0, end_loc - 1, 0, 0, ratio) try: adjs[adj_loc].append(mult) except KeyError: adjs[adj_loc] = [mult] return adjs
def _get_adjustments_in_range(self, asset, dts, field): """ Get the Float64Multiply objects to pass to an AdjustedArrayWindow. For the use of AdjustedArrayWindow in the loader, which looks back from current simulation time back to a window of data the dictionary is structured with: - the key into the dictionary for adjustments is the location of the day from which the window is being viewed. - the start of all multiply objects is always 0 (in each window all adjustments are overlapping) - the end of the multiply object is the location before the calendar location of the adjustment action, making all days before the event adjusted. Parameters ---------- asset : Asset The assets for which to get adjustments. dts : iterable of datetime64-like The dts for which adjustment data is needed. field : str OHLCV field for which to get the adjustments. Returns ------- out : dict[loc -> Float64Multiply] The adjustments as a dict of loc -> Float64Multiply """ sid = int(asset) start = normalize_date(dts[0]) end = normalize_date(dts[-1]) adjs = {} if field != 'volume': mergers = self._adjustments_reader.get_adjustments_for_sid( 'mergers', sid) for m in mergers: dt = m[0] if start < dt <= end: end_loc = dts.searchsorted(dt) adj_loc = end_loc mult = Float64Multiply(0, end_loc - 1, 0, 0, m[1]) try: adjs[adj_loc].append(mult) except KeyError: adjs[adj_loc] = [mult] divs = self._adjustments_reader.get_adjustments_for_sid( 'dividends', sid) for d in divs: dt = d[0] if start < dt <= end: end_loc = dts.searchsorted(dt) adj_loc = end_loc mult = Float64Multiply(0, end_loc - 1, 0, 0, d[1]) try: adjs[adj_loc].append(mult) except KeyError: adjs[adj_loc] = [mult] splits = self._adjustments_reader.get_adjustments_for_sid( 'splits', sid) for s in splits: dt = s[0] if start < dt <= end: if field == 'volume': ratio = 1.0 / s[1] else: ratio = s[1] end_loc = dts.searchsorted(dt) adj_loc = end_loc mult = Float64Multiply(0, end_loc - 1, 0, 0, ratio) try: adjs[adj_loc].append(mult) except KeyError: adjs[adj_loc] = [mult] return adjs
def test_orders_stop(self, name, order_data, event_data, expected): data = order_data data["asset"] = self.ASSET133 order = Order(**data) if expected["transaction"]: expected["transaction"]["asset"] = self.ASSET133 event_data["asset"] = self.ASSET133 assets = (( 133, pd.DataFrame( { "open": [event_data["open"]], "high": [event_data["high"]], "low": [event_data["low"]], "close": [event_data["close"]], "volume": [event_data["volume"]], }, index=[pd.Timestamp("2006-01-05 14:31", tz="UTC")], ), ), ) days = pd.date_range(start=normalize_date(self.minutes[0]), end=normalize_date(self.minutes[-1])) with tmp_bcolz_equity_minute_bar_reader(self.trading_calendar, days, assets) as reader: data_portal = DataPortal( self.asset_finder, self.trading_calendar, first_trading_day=reader.first_trading_day, equity_minute_reader=reader, ) slippage_model = VolumeShareSlippage() try: dt = pd.Timestamp("2006-01-05 14:31", tz="UTC") bar_data = BarData( data_portal, lambda: dt, self.sim_params.data_frequency, self.trading_calendar, NoRestrictions(), ) _, txn = next( slippage_model.simulate( bar_data, self.ASSET133, [order], )) except StopIteration: txn = None if expected["transaction"] is None: assert txn is None else: assert txn is not None for key, value in expected["transaction"].items(): assert value == txn[key]