def _filter_fetched_history(self, context, data): # Filter historic data according to minute frequency # for the freq alias: # http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases if self.state.DATA_FREQ == "minute": filter_dates = pd.date_range( start=self.date_init_reference, end=self.state.prices.iloc[-1].name, freq=str(self.state.MINUTE_FREQ) + "min", ) self.state.prices = self.state.prices.loc[filter_dates] self.state.prices = self.state.prices.dropna() if self.filter_dates is not None: self.filter_dates = self.filter_dates.append( self.filter_dates.symmetric_difference(filter_dates) ) else: self.filter_dates = filter_dates # Add current values to historic self.last_date = get_datetime() self.state.prices.loc[self.last_date] = self.state.current self.state.dump_to_context(context)
def handle_data(context, data): current_time = get_datetime().time() # When model is empty if len(context.model.get_renko_prices()) == 0: context.model = pyrenko.renko() history = data.history(context.asset, 'price', bar_count=context.n_history, frequency=context.tf) # Get daily absolute returns diffs = history.diff(context.diff_lag).abs() diffs = diffs[~np.isnan(diffs)] # Calculate IQR of daily returns iqr_diffs = np.percentile(diffs, [25, 75]) # Find the optimal brick size opt_bs = opt.fminbound(lambda x: -evaluate_renko( brick=x, history=history, column_name='score'), iqr_diffs[0], iqr_diffs[1], disp=0) # Build the model context.last_brick_size = opt_bs context.model.set_brick_size(brick_size=opt_bs, auto=False) context.model.build_history(prices=history) # Open a position order_target_percent( context.asset, context.leverage * context.model.get_renko_directions()[-1]) else: last_price = data.history( context.asset, 'price', bar_count=1, frequency='1440T', ) # Just for output and debug prev = context.model.get_renko_prices()[-1] prev_dir = context.model.get_renko_directions()[-1] num_created_bars = context.model.do_next(last_price) # If the last price moves in the backward direction we should rebuild the model if np.sign(context.portfolio.positions[context.asset].amount * context.model.get_renko_directions()[-1]) == -1: order_target_percent(context.asset, 0.0) context.model = pyrenko.renko() # or we cover the part of the position elif context.part_cover_ratio > 0.0 and num_created_bars != 0: order_target( context.asset, context.portfolio.positions[context.asset].amount * (1.0 - context.part_cover_ratio))
def handle_data(context, data): price = data.current(context.asset, 'price') record(price=price, cash=context.portfolio.cash) if not context.bought: order_target_percent(context.asset, 1) context.bought = True if get_datetime().date() == context.end_date: order_target_percent(context.asset, 0)
def calculate(self, df, namespace, **kw): self._signals_buy = False self._signals_sell = False self.idx += 1 self.current_date = get_datetime() self.log.info(str(self.idx) + ' - ' + str(self.current_date) + ' - ' + str(df.iloc[-1].price)) self.log.info(str(df.iloc[0].name) + ' - ' + str(df.iloc[-1].name)) self.log.info(f'Queuing {self.name} ML calculation') job = tasks.enqueue_ml_calculate(df, namespace, self.name, self.idx, self.current_date, self.hyper_params, df_final=self.df_final, **kw) self.current_job_id = job.id
def handle_data(context, data): results = pipeline_output('test') date = get_datetime().normalize() for asset in self.assets: # Assets should appear iff they exist today and yesterday. exists_today = self.exists(date, asset) existed_yesterday = self.exists(date - self.trading_day, asset) if exists_today and existed_yesterday: latest = results.loc[asset, 'close'] self.assertEqual(latest, self.expected_close(date, asset)) else: self.assertNotIn(asset, results.index)
def handle_data(context, data): current_time = get_datetime().time() # Get data A = data.history( context.A, 'price', bar_count=context.n_modelling, frequency=context.tf, ) B = data.history( context.B, 'price', bar_count=context.n_modelling, frequency=context.tf, ) # Calc returns and spread A_return = A.pct_change() B_return = B.pct_change() spread = A_return - B_return zscore = (spread.iloc[-1] - spread.mean()) / spread.std() # Close positions if context.portfolio.positions[ context.B].amount < 0 and zscore >= -context.z_signal_out: order_target_percent(context.A, 0.0) order_target_percent(context.B, 0.0) if context.portfolio.positions[ context.B].amount > 0 and zscore <= context.z_signal_out: order_target_percent(context.A, 0.0) order_target_percent(context.B, 0.0) # Check minimal allowed spread value if (abs(spread[-1]) >= context.min_spread ): # and np.sign(A_return[-1] * B_return[-1]) < 0: # Long and Short positions for assets if context.portfolio.positions[ context.B].amount == 0 and zscore > context.z_signal_in: order_target_percent(context.A, -0.5 * context.leverage) order_target_percent(context.B, 0.5 * context.leverage) if context.portfolio.positions[ context.B].amount == 0 and zscore < -context.z_signal_in: order_target_percent(context.A, 0.5 * context.leverage) order_target_percent(context.B, -0.5 * context.leverage) record(A_return=A_return[-1], B_return=B_return[-1], spread=spread[-1], zscore=zscore)
def handle_data(context, data): # Variables to record for a given asset: price and volume # Other options include 'open', 'high', 'open', 'close' # Please note that 'price' equals 'close' context.current_1m = data.history(context.asset, ['open', 'high', 'low', 'close'], 1, '1T') current_datetime = get_datetime() # if current_datetime.hour % 4 == 0 and current_datetime.minute == 0 and current_datetime.second == 0: # context.current_4h = data.history(context.asset, ['open', 'high', 'low', 'close'], 1, '4H') if current_datetime.hour % 1 == 0 and current_datetime.minute == 0 and current_datetime.second == 0: context.current_1h = data.history(context.asset, ['open', 'high', 'low', 'close'], 1, '1H') if current_datetime.hour == 0 and current_datetime.minute == 0 and current_datetime.second == 0: # Store some information daily print('\nCurrent date is ' + str(get_datetime().date())) print('elapsed time(minute): ' + str((time.time() - context.start_time) / 60)) #context.pricing_data_1m = context.pricing_data_1m.append(context.current_1m) context.pricing_data_1h = context.pricing_data_1h.append( context.current_1h) '''
def _stop_loss_sell(self, context, position): order( asset=self.state.asset, amount=-position.amount, # limit_price=self.state.price * (1 - self.state.SLIPPAGE_ALLOWED), ) profit = (self.state.price * position.amount) - ( position.cost_basis * position.amount ) msg = "Sold {amount} @ {price} Profit: {profit}; Produced by stop-loss signal at {date}".format( amount=position.amount, price=self.state.price, profit=profit, date=get_datetime(), ) self.log.notice(msg) self.notify(dedent(msg))
def handle_data(context, data): today = normalize_date(get_datetime()) results = pipeline_output('test') expect_over_300 = { AAPL: today < self.AAPL_split_date, MSFT: False, BRK_A: True, } for asset in assets: should_pass_filter = expect_over_300[asset] if set_screen and not should_pass_filter: self.assertNotIn(asset, results.index) continue asset_results = results.loc[asset] self.assertEqual(asset_results['filter'], should_pass_filter) for length in vwaps: computed = results.loc[asset, vwap_key(length)] expected = vwaps[length][asset].loc[today] # Only having two places of precision here is a bit # unfortunate. assert_almost_equal(computed, expected, decimal=2)
def handle_data(context, data): current_time = get_datetime().time() if current_time.hour == 0 and current_time.minute == 0: print("Current date is " + str(get_datetime().date())) # we check if the model is empty we should get the data, # calculate IQR, optimize the brick size, build the Renko chart, and open the order. if len(context.model.get_renko_prices()) == 0: context.model = renko() history = data.history(context.asset, "price", bar_count=context.n_history, frequency=context.tf) # Get daily absolute returns diffs = history.diff(1).abs() diffs = diffs[~np.isnan(diffs)] # Calculate Interquartile range of daily returns iqr_diffs = np.percentile(diffs, [25, 75]) # Find the optimal brick size opt_bs = opt.fminbound( lambda x: -evaluate_renko( brick=x, history=history, column_name="score"), iqr_diffs[0], iqr_diffs[1], disp=0, ) # Build the model print("REBUILDING RENKO: " + str(opt_bs)) context.last_brick_size = opt_bs context.model.set_brick_size(brick_size=opt_bs, auto=False) context.model.build_history(prices=history) # Open a position if context.model.get_renko_directions()[-1] == 1: order_target_percent(context.asset, 1) elif context.model.get_renko_directions()[-1] == -1: order_target_percent(context.asset, 0) # Open a position order_target_percent( context.asset, context.leverage * context.model.get_renko_directions()[-1]) # Store some information record( rebuilding_status=1, brick_size=context.last_brick_size, price=history[-1], renko_price=context.model.get_renko_prices()[-1], num_created_bars=0, amount=context.portfolio.positions[context.asset].amount, ) else: last_price = data.history( context.asset, "price", bar_count=1, frequency="1440T", ) # Just for output and debug prev = context.model.get_renko_prices()[-1] prev_dir = context.model.get_renko_directions()[-1] num_created_bars = context.model.do_next(last_price) if num_created_bars != 0: print("New Renko bars created") print("last price: " + str(last_price)) print("previous Renko price: " + str(prev)) print("current Renko price: " + str(context.model.get_renko_prices()[-1])) print("direction: " + str(prev_dir)) print("brick size: " + str(context.model.brick_size)) # Store some information record( rebuilding_status=0, brick_size=context.last_brick_size, price=last_price, renko_price=context.model.get_renko_prices()[-1], num_created_bars=num_created_bars, amount=context.portfolio.positions[context.asset].amount, ) # If the last price moves in the backward direction we should rebuild the model if (np.sign(context.portfolio.positions[context.asset].amount * context.model.get_renko_directions()[-1]) == -1): order_target_percent(context.asset, 0.0) context.model = renko() # or we cover the part of the position elif context.part_cover_ratio > 0.0 and num_created_bars != 0: order_target( context.asset, context.portfolio.positions[context.asset].amount * (1.0 - context.part_cover_ratio), )
def handle_data(context, data): current_date = get_datetime().date() current_time = get_datetime().time() # Just one time in a day (first minute) if current_time.hour == 0 and current_time.minute == 0 and current_time.second == 0: prices = pd.DataFrame() volumes = pd.DataFrame() try: prices = data.history(context.asset, fields = 'price', bar_count = context.n_periods, frequency = context.tf) volumes = data.history(context.asset, fields = 'volume', bar_count = context.n_periods, frequency = context.tf) except: print('NO DATA') if prices.shape[0] == context.n_periods and volumes.shape[0] == context.n_periods: features = pd.DataFrame() features['price'] = prices features['volume'] = volumes features['last_return'] = features['price'].pct_change() features['std_normalized'] = features['price'].rolling(context.std_period).apply(std_normalized) features['ma_ratio'] = features['price'].rolling(context.ma_period).apply(ma_ratio) features['price_deviation'] = features['price'].rolling(context.price_deviation_period).apply(values_deviation) features['volume_deviation'] = features['volume'].rolling(context.volume_deviation_period).apply(values_deviation) state = context.random_states[0] if features.dropna().shape[0] == (context.n_periods - context.ma_period + 1): state = int(context.model.predict(features[context.cols_features].dropna())[-1]) else: print('PROBLEM: features dataframe is too small') print('State on ' + str(current_date) + ' ' + str(current_time) + ': ' + str(state)) print('Amount on ' + str(current_date) + ' ' + str(current_time) + ': ' + str(context.portfolio.positions[context.asset].amount)) print(prices.dropna()) print(volumes.dropna()) if context.portfolio.positions[context.asset].amount <= 0 and state in context.long_states: print('LONG on ' + str(current_date) + ' ' + str(current_time)) order_target_percent(context.asset, 1.0 * context.leverage) context.best_price_ts = data.current(context.asset, 'close') if context.portfolio.positions[context.asset].amount != 0 and state in context.random_states: print('CLOSE on ' + str(current_date) + ' ' + str(current_time)) order_target_percent(context.asset, 0.0) if context.portfolio.positions[context.asset].amount >= 0 and state in context.short_states: print('SHORT on ' + str(current_date) + ' ' + str(current_time)) order_target_percent(context.asset, -1.0 * context.leverage) context.best_price_ts = data.current(context.asset, 'close') record(price = prices[-1], state = state, amount = context.portfolio.positions[context.asset].amount)
def handle_data(context, data): current_time = get_datetime().time() if current_time.hour == 0 and current_time.minute == 0: print('Current date is ' + str(get_datetime().date())) # When model is empty if len(context.model.get_renko_prices()) == 0: context.model = pyrenko.renko() history = data.history(context.asset, 'price', bar_count = context.n_history, frequency = context.tf ) # Get daily absolute returns diffs = history.diff(24).abs() diffs = diffs[~np.isnan(diffs)] # Calculate IQR of daily returns iqr_diffs = np.percentile(diffs, [25, 75]) # Find the optimal brick size opt_bs = opt.fminbound(lambda x: -evaluate_renko(brick = x, history = history, column_name = 'score'), iqr_diffs[0], iqr_diffs[1], disp=0) # Build the model print('REBUILDING RENKO: ' + str(opt_bs)) context.last_brick_size = opt_bs context.model.set_brick_size(brick_size = opt_bs, auto = False) context.model.build_history(prices = history) # Open a position order_target_percent(context.asset, context.leverage * context.model.get_renko_directions()[-1]) # Store some information record( rebuilding_status = 1, brick_size = context.last_brick_size, price = history[-1], renko_price = context.model.get_renko_prices()[-1], num_created_bars = 0, amount = context.portfolio.positions[context.asset].amount ) else: last_price = data.history(context.asset, 'price', bar_count = 1, frequency = '1440T', ) # Just for output and debug prev = context.model.get_renko_prices()[-1] prev_dir = context.model.get_renko_directions()[-1] num_created_bars = context.model.do_next(last_price) if num_created_bars != 0: print('New Renko bars created') print('last price: ' + str(last_price)) print('previous Renko price: ' + str(prev)) print('current Renko price: ' + str(context.model.get_renko_prices()[-1])) print('direction: ' + str(prev_dir)) print('brick size: ' + str(context.model.brick_size)) # Store some information record( rebuilding_status = 0, brick_size = context.last_brick_size, price = last_price, renko_price = context.model.get_renko_prices()[-1], num_created_bars = num_created_bars, amount = context.portfolio.positions[context.asset].amount ) # If the last price moves in the backward direction we should rebuild the model if np.sign(context.portfolio.positions[context.asset].amount * context.model.get_renko_directions()[-1]) == -1: order_target_percent(context.asset, 0.0) context.model = pyrenko.renko() # or we cover the part of the position elif context.part_cover_ratio > 0.0 and num_created_bars != 0: order_target(context.asset, context.portfolio.positions[context.asset].amount * (1.0 - context.part_cover_ratio))
def handle_data(context, data): current_time = get_datetime().time() if current_time.hour == 0 and current_time.minute == 0: print('Current date is ' + str(get_datetime().date())) context.i += 1 if context.i < context.atr_time: return if not data.can_trade(context.asset): return starting_cash = context.portfolio.starting_cash current = data.current(context.asset, 'close') price = data.current(context.asset, 'price') last_price = data.history(context.asset, 'price', bar_count=context.atr_time - 1, frequency=context.tf) if order_vol == 0.1 and stop == 0.9975: if context.i % 60 == 0: ohlcv_data = data.history( context.asset, fields=['open', 'high', 'low', 'close', 'volume'], bar_count=1, frequency='H') get_ohlcv(database=db, exchange=exchange_name, pair='BTCUSDT', open=ohlcv_data.open, high=ohlcv_data.high, low=ohlcv_data.low, close=ohlcv_data.close, volume=ohlcv_data.volume, timestamp=datetime.timestamp(get_datetime())) bb_data = data.history(context.asset, 'close', bar_count=context.bb, frequency=context.tf) hlc_data = data.history(context.asset, fields=['high', 'low', 'close'], bar_count=context.atr_time, frequency=context.tf) upperband, middleband, lowerband = talib.BBANDS(bb_data, timeperiod=context.bb - 1, nbdevup=2, nbdevdn=2, matype=0) upperband, middleband, lowerband = upperband[-1], middleband[ -1], lowerband[-1] bb_range = upperband - lowerband record(price=price, starting_cash=starting_cash, cash=context.portfolio.cash, upperband=upperband, middleband=middleband, lowerband=lowerband, num_trades=context.num_trades, order_result=context.order_result) context.model = pyrenko.renko() optimal_brick = context.model.set_brick_size(HLC_history=hlc_data) context.model.build_history(prices=last_price) prev_dir = context.model.get_renko_directions() last_dir = prev_dir[-4:-1] if not context.is_open: if last_dir == context.open_trigger and bb_range > 500: order_target_percent(context.asset, order_vol, limit_price=current * 1.001) context.is_open = True context.started = get_open_orders(context.asset)[-1].dt context.order_price = get_open_orders(context.asset)[-1].limit context.amount = get_open_orders(context.asset)[-1].amount positions(db, type=algo_type, side='Buy', start=context.started, open_price=context.order_price, finish=None, close=None, amount=context.amount, status='Open', closed_by=None, exchange=exchange_name, timestamp=datetime.timestamp(get_datetime())) else: if current <= context.order_price * stop and stop != 0: close_id = order_target_percent(context.asset, 0, limit_price=current) context.is_open = False context.num_trades += 1 price_diff = current - context.order_price context.order_result.append(price_diff) context.finished = get_order(close_id).dt context.close_price = get_order(close_id).limit context.closed_by = 'Stop Loss' record(num_trades=context.num_trades, order_result=context.order_result) query = Position.select(fn.MAX(Position.id)) p = (Position.update({ 'finished': context.finished, 'closed_price': context.close_price, 'closed_by': context.closed_by, 'status': 'Closed' }).where(Position.id == query.scalar())) p.execute() else: if last_dir == context.close_trigger: close_id = order_target_percent(context.asset, 0, limit_price=current) context.model = pyrenko.renko() context.is_open = False price_diff = current - context.order_price context.order_result.append(price_diff) context.num_trades += 1 context.finished = get_order(close_id).dt context.close_price = get_order(close_id).limit context.closed_by = 'Algo' record(num_trades=context.num_trades, order_result=context.order_result) query = Position.select(fn.MAX(Position.id)) p = (Position.update({ 'finished': context.finished, 'closed_price': context.close_price, 'closed_by': context.closed_by, 'status': 'Closed' }).where(Position.id == query.scalar())) p.execute(db)
def _process_data(self, context, data): """Called at each algo iteration Calculates indicators, processes signals, and records market and external data Arguments: context {pandas.Dataframe} -- Catalyst context object data {pandas.Datframe} -- Catalyst data object """ # catalyst dumps pickle file after handle_data called # so this call uploads the state of # the previously compelted iteration self.state.i += 1 self.log.info(f"Processing algo iteration - {self.state.i}") if not self.is_backtest and self.state.i > 1: outputs.upload_state_to_storage(self) else: self.log.debug("Skipping stats upload until catalyst writes to file") end = arrow.get(self.state.END) time_left = end.humanize(only_distance=True) self.log.debug(f"Stopping strategy in {time_left}") # the following called methods return: # True if the iteration should continued # False if the algo should not continue if not self._set_current_fields(context, data): return # To check to apply stop-loss, take-profit or keep position self.check_open_positions(context) # set date first for logging purposes self.current_date = get_datetime() if not self.fetch_history(context, data): return # Filter minute frequency self._check_minute_freq(context, data) if self.in_job: job = get_current_job() job.meta["date"] = str(self.current_date) job.save_meta() for i in context.blotter.open_orders: msg = "Canceling unfilled open order {}".format(i) self.log.info(msg) self.notify(msg) cancel_order(i) if not self.fetch_history(context, data): return self._filter_fetched_history(context, data) # ## enqueue ml models as soon as data filtered if self._ml_models: self._enqueue_ml_calcs(context, data) else: for dataset, manager in self._datasets.items(): manager.calculate(context) manager.record_data(context) for i in self._market_indicators: try: i.calculate(self.state.prices) i.record() except Exception as e: self.log.error(e) self.log.error("Error calculating {}, skipping...".format(i.name)) for i in self._ml_models: i.record() self._extra_handle(context, data) self._count_signals(context, data) if context.frame_stats: pretty_output = stats_utils.get_pretty_stats(context.frame_stats) self.log.debug(pretty_output) if not self.is_backtest: outputs.save_stats_to_storage(self) self.state.dump_to_context(context)