def error(msg: str) -> None: if jh.app_mode() not in LOGGERS: _init_main_logger() # error logs should be logged as info logs as well info(msg) msg = str(msg) from jesse.store import store log_id = jh.generate_unique_id() log_dict = { 'id': log_id, 'timestamp': jh.now_to_timestamp(), 'message': msg } if jh.is_live() and jh.get_config('env.notifications.events.errors', True): # notify_urgently(f"ERROR at \"{jh.get_config('env.identifier')}\" account:\n{msg}") notify_urgently(f"ERROR:\n{msg}") notify(f'ERROR:\n{msg}') if (jh.is_backtesting() and jh.is_debugging()) or jh.is_collecting_data() or jh.is_live(): sync_publish('error_log', log_dict) store.logs.errors.append(log_dict) if jh.is_live() or jh.is_optimizing(): msg = f"[ERROR | {jh.timestamp_to_time(jh.now_to_timestamp())[:19]}] {msg}" logger = LOGGERS[jh.app_mode()] logger.error(msg) if jh.is_live(): from jesse.models.utils import store_log_into_db store_log_into_db(log_dict, 'error')
def info(msg: str, send_notification=False) -> None: if jh.app_mode() not in LOGGERS: _init_main_logger() msg = str(msg) from jesse.store import store log_id = jh.generate_unique_id() log_dict = { 'id': log_id, 'timestamp': jh.now_to_timestamp(), 'message': msg } store.logs.info.append(log_dict) if jh.is_collecting_data() or jh.is_live(): sync_publish('info_log', log_dict) if jh.is_live() or (jh.is_backtesting() and jh.is_debugging()): msg = f"[INFO | {jh.timestamp_to_time(jh.now_to_timestamp())[:19]}] {msg}" logger = LOGGERS[jh.app_mode()] logger.info(msg) if jh.is_live(): from jesse.models.utils import store_log_into_db store_log_into_db(log_dict, 'info') if send_notification: notify(msg)
def _check(self) -> None: """Based on the newly updated info, check if we should take action or not""" if not self._is_initiated: self._is_initiated = True if jh.is_live() and jh.is_debugging(): logger.info( f'Executing {self.name}-{self.exchange}-{self.symbol}-{self.timeframe}' ) # for caution to make sure testing on livetrade won't bleed your account if jh.is_test_driving() and store.completed_trades.count >= 2: logger.info('Maximum allowed trades in test-drive mode is reached') return if self._open_position_orders != [] and self.is_close and self.should_cancel( ): self._execute_cancel() # make sure order cancellation response is received via WS if jh.is_live(): # sleep a little until cancel is received via WS sleep(0.1) # just in case, sleep some more if necessary for _ in range(20): if store.orders.count_active_orders( self.exchange, self.symbol) == 0: break logger.info('sleeping 0.2 more seconds...') sleep(0.2) # If it's still not cancelled, something is wrong. Handle cancellation failure if store.orders.count_active_orders(self.exchange, self.symbol) != 0: raise exceptions.ExchangeNotResponding( 'The exchange did not respond as expected') if self.position.is_open: self._update_position() if jh.is_backtesting() or jh.is_unit_testing(): store.orders.execute_pending_market_orders() if self.position.is_close and self._open_position_orders == []: should_short = self.should_short() should_long = self.should_long() # validation if should_short and should_long: raise exceptions.ConflictingRules( 'should_short and should_long should not be true at the same time.' ) if should_long: self._execute_long() elif should_short: self._execute_short()
def info(msg: str) -> None: from jesse.store import store store.logs.info.append({'time': jh.now_to_timestamp(), 'message': msg}) if (jh.is_backtesting() and jh.is_debugging()) or jh.is_collecting_data(): print(f'[{jh.timestamp_to_time(jh.now_to_timestamp())}]: {msg}') if jh.is_live(): msg = f"[INFO | {jh.timestamp_to_time(jh.now_to_timestamp())[:19]}] {str(msg)}" logging.info(msg)
def info(msg): from jesse.store import store store.logs.info.append({'time': jh.now(), 'message': msg}) if (jh.is_backtesting() and jh.is_debugging()) or jh.is_collecting_data(): print(jh.color('[{}]: {}'.format(jh.timestamp_to_time(jh.now()), msg), 'magenta')) if jh.is_live(): msg = '[INFO | {}] '.format(jh.timestamp_to_time(jh.now())[:19]) + str(msg) import logging logging.info(msg)
def error(msg): from jesse.store import store if jh.is_live() and jh.get_config('env.notifications.events.errors', True): notify('ERROR:\n{}'.format(msg)) if (jh.is_backtesting() and jh.is_debugging()) or jh.is_collecting_data(): print(jh.color('[{}]: {}'.format(jh.timestamp_to_time(jh.now()), msg), 'red')) store.logs.errors.append({'time': jh.now(), 'message': msg}) if jh.is_live(): msg = '[ERROR | {}] '.format(jh.timestamp_to_time(jh.now())[:19]) + str(msg) import logging logging.error(msg)
def error(msg: str) -> None: msg = str(msg) from jesse.store import store if jh.is_live() and jh.get_config('env.notifications.events.errors', True): notify_urgently(f"ERROR at \"{jh.get_config('env.identifier')}\" account:\n{msg}") notify(f'ERROR:\n{msg}') if (jh.is_backtesting() and jh.is_debugging()) or jh.is_collecting_data(): print(jh.color(f'[{jh.timestamp_to_time(jh.now_to_timestamp())}]: {msg}', 'red')) store.logs.errors.append({'time': jh.now_to_timestamp(), 'message': msg}) if jh.is_live() or jh.is_optimizing(): msg = f"[ERROR | {jh.timestamp_to_time(jh.now_to_timestamp())[:19]}] {msg}" logging.error(msg)
def set_config(conf: dict) -> None: global config # optimization mode only if jh.is_optimizing(): # ratio config['env']['optimization']['ratio'] = conf['ratio'] # exchange info (only one because the optimize mode supports only one trading route at the moment) config['env']['optimization']['exchange'] = conf['exchange'] # warm_up_candles config['env']['optimization']['warmup_candles_num'] = int(conf['warm_up_candles']) # backtest and live if jh.is_backtesting() or jh.is_live(): # warm_up_candles config['env']['data']['warmup_candles_num'] = int(conf['warm_up_candles']) # logs config['env']['logging'] = conf['logging'] # exchanges for key, e in conf['exchanges'].items(): config['env']['exchanges'][e['name']] = { 'fee': float(e['fee']), 'type': 'futures', # used only in futures trading # 'settlement_currency': 'USDT', 'settlement_currency': jh.get_settlement_currency_from_exchange(e['name']), # accepted values are: 'cross' and 'isolated' 'futures_leverage_mode': e['futures_leverage_mode'], # 1x, 2x, 10x, 50x, etc. Enter as integers 'futures_leverage': int(e['futures_leverage']), 'assets': [ {'asset': 'USDT', 'balance': float(e['balance'])}, ], } # live mode only if jh.is_live(): config['env']['notifications'] = conf['notifications'] # TODO: must become a config value later when we go after multi account support? config['env']['identifier'] = 'main'
def _init_main_logger(): session_id = jh.get_session_id() jh.make_directory('storage/logs/live-mode') jh.make_directory('storage/logs/backtest-mode') jh.make_directory('storage/logs/optimize-mode') jh.make_directory('storage/logs/collect-mode') if jh.is_live(): filename = f'storage/logs/live-mode/{session_id}.txt' elif jh.is_collecting_data(): filename = f'storage/logs/collect-mode/{session_id}.txt' elif jh.is_optimizing(): filename = f'storage/logs/optimize-mode/{session_id}.txt' elif jh.is_backtesting(): filename = f'storage/logs/backtest-mode/{session_id}.txt' else: filename = 'storage/logs/etc.txt' new_logger = logging.getLogger(jh.app_mode()) new_logger.setLevel(logging.INFO) new_logger.addHandler(logging.FileHandler(filename, mode='w')) LOGGERS[jh.app_mode()] = new_logger
def load_candles( start_date_str: str, finish_date_str: str) -> Dict[str, Dict[str, Union[str, np.ndarray]]]: start_date = jh.date_to_timestamp(start_date_str) finish_date = jh.date_to_timestamp(finish_date_str) - 60000 # validate if start_date == finish_date: raise ValueError('start_date and finish_date cannot be the same.') if start_date > finish_date: raise ValueError('start_date cannot be bigger than finish_date.') if finish_date > arrow.utcnow().int_timestamp * 1000: raise ValueError("Can't load candle data from the future!") # load and add required warm-up candles for backtest if jh.is_backtesting(): for c in config['app']['considering_candles']: required_candles.inject_required_candles_to_store( required_candles.load_required_candles(c[0], c[1], start_date_str, finish_date_str), c[0], c[1]) # download candles for the duration of the backtest candles = {} for c in config['app']['considering_candles']: exchange, symbol = c[0], c[1] key = jh.key(exchange, symbol) cache_key = '{}-{}-'.format(start_date_str, finish_date_str) + key cached_value = cache.get_value(cache_key) # if cache exists if cached_value: candles_tuple = cached_value # not cached, get and cache for later calls in the next 5 minutes else: # fetch from database candles_tuple = Candle.select( Candle.timestamp, Candle.open, Candle.close, Candle.high, Candle.low, Candle.volume).where( Candle.timestamp.between(start_date, finish_date), Candle.exchange == exchange, Candle.symbol == symbol).order_by( Candle.timestamp.asc()).tuples() # validate that there are enough candles for selected period required_candles_count = (finish_date - start_date) / 60_000 if len(candles_tuple) == 0 or candles_tuple[-1][ 0] != finish_date or candles_tuple[0][0] != start_date: raise exceptions.CandleNotFoundInDatabase( 'Not enough candles for {}. Try running "jesse import-candles"' .format(symbol)) elif len(candles_tuple) != required_candles_count + 1: raise exceptions.CandleNotFoundInDatabase( 'There are missing candles between {} => {}'.format( start_date_str, finish_date_str)) # cache it for near future calls cache.set_value(cache_key, tuple(candles_tuple), expire_seconds=60 * 60 * 24 * 7) candles[key] = { 'exchange': exchange, 'symbol': symbol, 'candles': np.array(candles_tuple) } return candles
def test_is_backtesting(): assert jh.is_backtesting() is True