def security(self, bin_size): """ Recalculate and obtain different time frame data """ if bin_size not in self.resample_data: self.resample_data[bin_size] = resample(self.df_ohlcv, bin_size) return self.resample_data[bin_size][:self.data.iloc[-1].name].iloc[-1 * self.ohlcv_len:, :]
def security(self, bin_size): """ 別時間軸データを再計算して、取得する """ if bin_size not in self.resample_data: self.resample_data[bin_size] = resample(self.df_ohlcv, bin_size) return self.resample_data[bin_size][:self.data.iloc[-1].name].iloc[-1 * self.ohlcv_len:, :]
def fetch_ohlcv(self, bin_size, start_time, end_time): """ fetch OHLCV data :param start_time: start time :param end_time: end time :return: """ self.__init_client() fetch_bin_size = allowed_range[bin_size][0] left_time = start_time right_time = end_time data = to_data_frame([]) while True: source = retry(lambda: self.public_client.Trade.Trade_getBucketed( symbol=self.pair, binSize=fetch_bin_size, startTime=left_time, endTime=right_time, count=500, partial=True).result()) if len(source) == 0: break source = to_data_frame(source) data = pd.concat([data, source]) if right_time > source.iloc[-1].name + delta(fetch_bin_size): left_time = source.iloc[-1].name + delta(fetch_bin_size) time.sleep(2) else: break return resample(data, bin_size)
def __update_ohlcv(self, action, new_data): """ 데이터를 취득한 후, 전략을 실행 데이터가 없으면 서버와 접속해서 다운로드를 처음 함, 그 후는 어떻게 할까 """ if self.data is None: end_time = datetime.now(timezone.utc) start_time = end_time - self.ohlcv_len * delta(self.bin_size) d1 = self.fetch_ohlcv(self.bin_size, start_time, end_time) if len(d1) > 0: d2 = self.fetch_ohlcv( allowed_range[self.bin_size][0], d1.iloc[-1].name + delta(allowed_range[self.bin_size][0]), end_time) self.data = pd.concat([d1, d2], sort=True) else: self.data = d1 else: self.data = pd.concat([self.data, new_data], sort=True) # 마지막행은 불학정정보이기에 베제한다. (original) # re_sample_data = resample(self.data, self.bin_size)[:-1] # 마지막행도 반영한다. (by neo) re_sample_data = resample(self.data, self.bin_size)[:] if self.data.iloc[-1].name == re_sample_data.iloc[-1].name: self.data = re_sample_data.iloc[-1 * self.ohlcv_len:, :] if self.last_action_time is not None and \ self.last_action_time == re_sample_data.iloc[-1].name: return open = re_sample_data['open'].values close = re_sample_data['close'].values high = re_sample_data['high'].values low = re_sample_data['low'].values volume = re_sample_data['volume'].values try: if self.strategy is not None: self.strategy(open, close, high, low, volume) self.last_action_time = re_sample_data.iloc[-1].name except FatalError as e: # 致命的エラー logger.error(f"Fatal error. {e}") logger.error(traceback.format_exc()) notify(f"Fatal error occurred. Stopping Bot. {e}") notify(traceback.format_exc()) self.stop() except Exception as e: logger.error(f"An error occurred. {e}") logger.error(traceback.format_exc()) notify(f"An error occurred. {e}") notify(traceback.format_exc())
def __update_ohlcv(self, action, new_data): new_data = new_data.rename(index={new_data.iloc[0].name: new_data.iloc[0].name.ceil(freq="1T")}) """ get OHLCV data and execute the strategy """ if self.data is None: end_time = datetime.now(timezone.utc) start_time = end_time - self.ohlcv_len * delta(self.bin_size) # logger.info(f"start time fetch ohlcv: {start_time}") # logger.info(f"end time fetch ohlcv: {end_time}") self.data = self.fetch_ohlcv(self.bin_size, start_time, end_time) if self.data.iloc[-1].name > end_time: last_candle = self.data.iloc[-1].values self.data = self.data[:-1] self.data.loc[end_time.replace(microsecond=0)] = last_candle logger.info(f"Initial Buffer Fill - Last Candle: {self.data.iloc[-1].name}") else: if self.data.iloc[-1].name == new_data.iloc[0].name: self.data = pd.concat([self.data[:-1], new_data]) else: self.data = pd.concat([self.data, new_data]) # exclude current candle data re_sample_data = resample(self.data, self.bin_size)[:-1] if self.last_action_time is not None and self.last_action_time == re_sample_data.iloc[-1].name: return self.data = pd.concat([re_sample_data.iloc[-1 * self.ohlcv_len :, :], self.data.iloc[[-1]]]) open = re_sample_data["open"].values close = re_sample_data["close"].values high = re_sample_data["high"].values low = re_sample_data["low"].values volume = re_sample_data["volume"].values try: if self.strategy is not None: self.timestamp = re_sample_data.iloc[-1].name.isoformat() self.strategy(open, close, high, low, volume) self.eval_exit() self.last_action_time = re_sample_data.iloc[-1].name except FatalError as e: # Fatal error logger.error(f"Fatal error. {e}") logger.error(traceback.format_exc()) notify(f"Fatal error occurred. Stopping Bot. {e}") notify(traceback.format_exc()) self.stop() except Exception as e: logger.error(f"An error occurred. {e}") logger.error(traceback.format_exc()) notify(f"An error occurred. {e}") notify(traceback.format_exc())
def __update_ohlcv(self, action, new_data): """ get OHLCV data and execute the strategy """ if self.data is None: end_time = datetime.now(timezone.utc) start_time = end_time - self.ohlcv_len * delta(self.bin_size) #logger.info(f"start time fetch ohlcv: {start_time}") #logger.info(f"end time fetch ohlcv: {end_time}") d1 = self.fetch_ohlcv(self.bin_size, start_time, end_time) if len(d1) > 0: d2 = self.fetch_ohlcv( allowed_range[self.bin_size][0], d1.iloc[-1].name + delta(allowed_range[self.bin_size][0]), end_time) self.data = pd.concat([d1, d2]) else: self.data = d1 else: self.data = pd.concat([self.data, new_data]) # exclude current candle data re_sample_data = resample(self.data, self.bin_size)[:-1] if self.data.iloc[-1].name == re_sample_data.iloc[-1].name: self.data = re_sample_data.iloc[-1 * self.ohlcv_len:, :] if self.last_action_time is not None and \ self.last_action_time == re_sample_data.iloc[-1].name: return open = re_sample_data['open'].values close = re_sample_data['close'].values high = re_sample_data['high'].values low = re_sample_data['low'].values volume = re_sample_data['volume'].values try: if self.strategy is not None: self.strategy(open, close, high, low, volume) self.last_action_time = re_sample_data.iloc[-1].name except FatalError as e: # Fatal error logger.error(f"Fatal error. {e}") logger.error(traceback.format_exc()) notify(f"Fatal error occurred. Stopping Bot. {e}") notify(traceback.format_exc()) self.stop() except Exception as e: logger.error(f"An error occurred. {e}") logger.error(traceback.format_exc()) notify(f"An error occurred. {e}") notify(traceback.format_exc())
def fetch_ohlcv(self, bin_size, start_time, end_time): """ fetch OHLCV data :param start_time: start time :param end_time: end time :return: """ self.__init_client() fetch_bin_size = allowed_range[bin_size][0] left_time = start_time right_time = end_time data = to_data_frame([]) while True: if left_time > right_time: break logger.info(f"fetching OHLCV data") left_time_to_timestamp = int(datetime.timestamp(left_time) * 1000) right_time_to_timestamp = int( datetime.timestamp(right_time) * 1000) source = retry(lambda: self.client.futures_klines( symbol=self.pair, interval=fetch_bin_size, startTime=left_time_to_timestamp, endTime=right_time_to_timestamp, limit=1500)) if len(source) == 0: break source_to_object_list = [] for s in source: timestamp_to_datetime = datetime.fromtimestamp( s[6] / 1000).astimezone(UTC) source_to_object_list.append({ "timestamp": timestamp_to_datetime, "high": float(s[2]), "low": float(s[3]), "open": float(s[1]), "close": float(s[4]), "volume": float(s[5]) }) source = to_data_frame(source_to_object_list) data = pd.concat([data, source]) if right_time > source.iloc[-1].name + delta(fetch_bin_size): left_time = source.iloc[-1].name + delta(fetch_bin_size) time.sleep(2) else: break return resample(data, bin_size)
def __update_ohlcv(self, action, new_data): """ データを取得して、戦略を実行する。 """ if self.data is None: end_time = datetime.now(timezone.utc) start_time = end_time - self.ohlcv_len * delta(self.bin_size) d1 = self.fetch_ohlcv(self.bin_size, start_time, end_time) if len(d1) > 0: d2 = self.fetch_ohlcv(allowed_range[self.bin_size][0], d1.iloc[-1].name + delta(allowed_range[self.bin_size][0]), end_time) self.data = pd.concat([d1, d2], sort=True) else: self.data = d1 else: self.data = pd.concat([self.data, new_data], sort=True) # 最後の行は不確定情報のため、排除する re_sample_data = resample(self.data, self.bin_size)[:-1] if self.data.iloc[-1].name == re_sample_data.iloc[-1].name: self.data = re_sample_data.iloc[-1 * self.ohlcv_len:, :] if self.last_action_time is not None and \ self.last_action_time == re_sample_data.iloc[-1].name: return open = re_sample_data['open'].values close = re_sample_data['close'].values high = re_sample_data['high'].values low = re_sample_data['low'].values volume = re_sample_data['volume'].values try: if self.strategy is not None: self.strategy(open, close, high, low, volume) self.last_action_time = re_sample_data.iloc[-1].name except FatalError as e: # 致命的エラー logger.error(f"Fatal error. {e}") logger.error(traceback.format_exc()) notify(f"Fatal error occurred. Stopping Bot. {e}") notify(traceback.format_exc()) self.stop() except Exception as e: logger.error(f"An error occurred. {e}") logger.error(traceback.format_exc()) notify(f"An error occurred. {e}") notify(traceback.format_exc())
def fetch_ohlcv(self, bin_size, start_time, end_time): """ 足データを取得する :param start_time: 開始時間 :param end_time: 終了時間 :return: """ self.__init_client() fetch_bin_size = allowed_range[bin_size][0] data = retry(lambda: self.public_client.Trade.Trade_getBucketed( symbol="XBTUSD", binSize=fetch_bin_size, startTime=start_time, endTime=end_time, count=500, partial=False).result()) data_frame = to_data_frame(data) return resample(data_frame, bin_size)
def security(self, bin_size): """ Recalculate and obtain different time frame data """ return resample(self.data, bin_size)[:-1]
def security(self, bin_size): """ 別時間軸データを再計算して、取得する """ return resample(self.data, bin_size)[:-1]
def __crawler_run(self): """ Get the data and execute the strategy. """ self.df_ohlcv = self.df_ohlcv.set_index(self.df_ohlcv.columns[0]) self.df_ohlcv.index = pd.to_datetime(self.df_ohlcv.index, errors='coerce') start = time.time() # load and resample warmup data self.warmup_len = (allowed_range_minute_granularity[self.warmup_tf][3] * self.ohlcv_len) \ if self.minute_granularity else self.ohlcv_len if self.timeframe_data is None: self.timeframe_data = {} for t in self.bin_size: self.timeframe_data[t] = resample(self.df_ohlcv, t, minute_granularity=self.minute_granularity) if self.minute_granularity \ else self.df_ohlcv # if a single timeframe is used without minute_granularity it already resampled the data after downloading it self.timeframe_info[t] = { "allowed_range": allowed_range_minute_granularity[t][0] if self.minute_granularity else self.bin_size[0], #allowed_range[t][0], "ohlcv": self.timeframe_data[t][:-1], # Dataframe with closed candles, "last_action_index": math.ceil(self.warmup_len / allowed_range_minute_granularity[t][3]) if self.minute_granularity \ else self.warmup_len } #logger.info(f"timeframe info: {self.timeframe_info}") for i in range(self.warmup_len): self.balance_history.append((self.get_balance() - self.start_balance))#/100000000*self.get_market_price()) self.draw_down_history.append(self. max_draw_down_session_perc) for i in range(len(self.df_ohlcv) - self.warmup_len): self.data = self.df_ohlcv.iloc[i:i + self.warmup_len + 1, :] index = self.data.iloc[-1].name new_data = self.data.iloc[-1:] # action is either the(only) key of self.timeframe_info dictionary, which is a single timeframe string # or "1m" when minute granularity is needed - multiple timeframes or self.minute_granularity = True action = "1m" if (self.minute_granularity or len(self.timeframe_info) > 1) else self.bin_size[0] timeframes_to_process = [] for t in self.timeframe_info: if self.timeframe_info[t]["allowed_range"] == action: # append minute count of a timeframe when sorting when sorting is need otherwise just add a string timeframe timeframes_to_process.append(allowed_range_minute_granularity[t][3]) if self.timeframes_sorted != None else timeframes_to_process.append(t) # Sorting timeframes that will be updated if self.timeframes_sorted == True: timeframes_to_process.sort(reverse=True) if self.timeframes_sorted == False: timeframes_to_process.sort(reverse=False) # logger.info(f"timefeames to update: {timeframes_to_update}") for t in timeframes_to_process: # Find timeframe string based on its minute count value if self.timeframes_sorted != None: t = find_timeframe_string(t) last_action_index = self.timeframe_info[t]["last_action_index"] # Append the latest candle if new if self.timeframe_data[t].iloc[last_action_index].name != new_data.iloc[0].name: continue tf_ohlcv_data = self.timeframe_data[t].iloc[last_action_index-self.ohlcv_len : last_action_index+1] close = tf_ohlcv_data['close'].values open = tf_ohlcv_data['open'].values high = tf_ohlcv_data['high'].values low = tf_ohlcv_data['low'].values volume = tf_ohlcv_data['volume'].values if (t == "1m" and self.minute_granularity) or self.minute_granularity != True: if self.get_position_size() > 0 and low[-1] > self.get_trail_price(): self.set_trail_price(low[-1]) if self.get_position_size() < 0 and high[-1] < self.get_trail_price(): self.set_trail_price(high[-1]) self.market_price = close[-1] self.OHLC = { 'open': open, 'high': high, 'low': low, 'close': close } self.index = index self.balance_history.append((self.get_balance() - self.start_balance)) #/ 100000000 * self.get_market_price()) #self.eval_sltp() self.timestamp = tf_ohlcv_data.iloc[-1].name.isoformat().replace("T"," ") self.strategy(t, open, close, high, low, volume) self.timeframe_info[t]['last_action_index'] += 1 #self.balance_history.append((self.get_balance() - self.start_balance)) #/ 100000000 * self.get_market_price()) #self.eval_exit() #self.eval_sltp() self.close_all() logger.info(f"Back test time : {time.time() - start}")
def security(self, bin_size): """ 다른 시간축 데이터를 재계산 후, 취득 """ return resample(self.data, bin_size)[:-1]
def __update_ohlcv(self, action, new_data): """ get and update OHLCV data and execute the strategy """ # Binance can output wierd timestamps - Eg. 2021-05-25 16:04:59.999000+00:00 # We need to round up to the nearest second for further processing new_data = new_data.rename(index={new_data.iloc[0].name: new_data.iloc[0].name.ceil(freq='1T')}) if self.timeframe_data is None: self.timeframe_data = {} for t in self.bin_size: bin_size = t end_time = datetime.now(timezone.utc) start_time = end_time - self.ohlcv_len * delta(bin_size) self.timeframe_data[bin_size] = self.fetch_ohlcv(bin_size, start_time, end_time) self.timeframe_info[bin_size] = { "allowed_range": allowed_range_minute_granularity[t][0] if self.minute_granularity else allowed_range[t][0], "ohlcv": self.timeframe_data[t][:-1], # Dataframe with closed candles "last_action_time": None,#self.timeframe_data[bin_size].iloc[-1].name, # Last strategy execution time "last_candle": self.timeframe_data[bin_size].iloc[-2].values, # Store last complete candle "partial_candle": self.timeframe_data[bin_size].iloc[-1].values # Store incomplete candle } # The last candle is an incomplete candle with timestamp in future if self.timeframe_data[bin_size].iloc[-1].name > end_time: last_candle = self.timeframe_data[t].iloc[-1].values # Store last candle self.timeframe_data[bin_size] = self.timeframe_data[t][:-1] # Exclude last candle self.timeframe_data[bin_size].loc[end_time.replace(microsecond=0)] = last_candle #set last candle to end_time logger.info(f"Initial Buffer Fill - Last Candle: {self.timeframe_data[bin_size].iloc[-1].name}") #logger.info(f"{self.timeframe_data}") timeframes_to_update = [] for t in self.timeframe_info: if self.timeframe_info[t]["allowed_range"] == action: # append minute count of a timeframe when sorting when sorting is need otherwise just add a string timeframe timeframes_to_update.append(allowed_range_minute_granularity[t][3]) if self.timeframes_sorted != None else timeframes_to_update.append(t) # Sorting timeframes that will be updated if self.timeframes_sorted == True: timeframes_to_update.sort(reverse=True) if self.timeframes_sorted == False: timeframes_to_update.sort(reverse=False) #logger.info(f"timefeames to update: {timeframes_to_update}") for t in timeframes_to_update: # Find timeframe string based on its minute count value if self.timeframes_sorted != None: t = find_timeframe_string(t) # replace latest candle if timestamp is same or append if self.timeframe_data[t].iloc[-1].name == new_data.iloc[0].name: self.timeframe_data[t] = pd.concat([self.timeframe_data[t][:-1], new_data]) else: self.timeframe_data[t] = pd.concat([self.timeframe_data[t], new_data]) # exclude current candle data and store partial candle data re_sample_data = resample(self.timeframe_data[t], t, minute_granularity=True if self.minute_granularity else False) self.timeframe_info[t]['partial_candle'] = re_sample_data.iloc[-1].values # store partial candle data re_sample_data =re_sample_data[:-1] # exclude current candle data #logger.info(f"{self.timeframe_info[t]['last_action_time']} : {self.timeframe_data[t].iloc[-1].name} : {re_sample_data.iloc[-1].name}") if self.timeframe_info[t]["last_action_time"] is None: self.timeframe_info[t]["last_action_time"] = re_sample_data.iloc[-1].name if self.timeframe_info[t]["last_action_time"] == re_sample_data.iloc[-1].name: continue # The last candle in the buffer needs to be preserved # while resetting the buffer as it may be incomlete # or contains latest data from WS self.timeframe_data[t] = pd.concat([re_sample_data.iloc[-1 * self.ohlcv_len:, :], self.timeframe_data[t].iloc[[-1]]]) #store ohlcv dataframe to timeframe_info dictionary self.timeframe_info[t]["ohlcv"] = re_sample_data #logger.info(f"Buffer Right Edge: {self.data.iloc[-1]}") open = re_sample_data['open'].values close = re_sample_data['close'].values high = re_sample_data['high'].values low = re_sample_data['low'].values volume = re_sample_data['volume'].values try: if self.strategy is not None: self.timestamp = re_sample_data.iloc[-1].name.isoformat() self.strategy(t, open, close, high, low, volume) self.timeframe_info[t]['last_action_time'] = re_sample_data.iloc[-1].name except FatalError as e: # Fatal error logger.error(f"Fatal error. {e}") logger.error(traceback.format_exc()) notify(f"Fatal error occurred. Stopping Bot. {e}") notify(traceback.format_exc()) self.stop() except Exception as e: logger.error(f"An error occurred. {e}") logger.error(traceback.format_exc()) notify(f"An error occurred. {e}") notify(traceback.format_exc())
def __update_ohlcv(self, action, new_data): # Binance can output wierd timestamps - Eg. 2021-05-25 16:04:59.999000+00:00 # We need to round up to the nearest second for further processing new_data = new_data.rename(index={new_data.iloc[0].name: new_data.iloc[0].name.ceil(freq='1T')}) """ get OHLCV data and execute the strategy """ if self.data is None: end_time = datetime.now(timezone.utc) start_time = end_time - self.ohlcv_len * delta(self.bin_size) #logger.info(f"start time fetch ohlcv: {start_time}") #logger.info(f"end time fetch ohlcv: {end_time}") self.data = self.fetch_ohlcv(self.bin_size, start_time, end_time) # The last candle is an incomplete candle with timestamp # in future if(self.data.iloc[-1].name > end_time): last_candle = self.data.iloc[-1].values # Store last candle self.data = self.data[:-1] # exclude last candle self.data.loc[end_time.replace(microsecond=0)] = last_candle #set last candle to end_time logger.info(f"Initial Buffer Fill - Last Candle: {self.data.iloc[-1].name}") else: #replace latest candle if timestamp is same or append if(self.data.iloc[-1].name == new_data.iloc[0].name): self.data = pd.concat([self.data[:-1], new_data]) else: self.data = pd.concat([self.data, new_data]) # exclude current candle data re_sample_data = resample(self.data, self.bin_size)[:-1] # logger.info(f"{self.last_action_time} : {self.data.iloc[-1].name} : {re_sample_data.iloc[-1].name}") if self.last_action_time is not None and \ self.last_action_time == re_sample_data.iloc[-1].name: return # The last candle in the buffer needs to be preserved # while resetting the buffer as it may be incomlete # or contains latest data from WS self.data = pd.concat([re_sample_data.iloc[-1 * self.ohlcv_len:, :], self.data.iloc[[-1]]]) #logger.info(f"Buffer Right Edge: {self.data.iloc[-1]}") open = re_sample_data['open'].values close = re_sample_data['close'].values high = re_sample_data['high'].values low = re_sample_data['low'].values volume = re_sample_data['volume'].values try: if self.strategy is not None: self.timestamp = re_sample_data.iloc[-1].name.isoformat() self.strategy(open, close, high, low, volume) self.last_action_time = re_sample_data.iloc[-1].name except FatalError as e: # Fatal error logger.error(f"Fatal error. {e}") logger.error(traceback.format_exc()) notify(f"Fatal error occurred. Stopping Bot. {e}") notify(traceback.format_exc()) self.stop() except Exception as e: logger.error(f"An error occurred. {e}") logger.error(traceback.format_exc()) notify(f"An error occurred. {e}") notify(traceback.format_exc())