def test_log_debug_messages_to_console(self): # Arrange logger = TestLogger(clock=TestClock(), level_console=LogLevel.DEBUG) logger_adapter = LoggerAdapter("TEST_LOGGER", logger) # Act logger_adapter.debug("This is a log message.") # Assert self.assertTrue(True) # No exception raised
def test_log_debug_messages_to_console(self): # Arrange logger = Logger(clock=TestClock(), level_stdout=LogLevel.DEBUG) logger_adapter = LoggerAdapter(component="TEST_LOGGER", logger=logger) # Act logger_adapter.debug("This is a log message.") # Assert assert True # No exceptions raised
def test_can_log_debug_messages_to_console(self): # Arrange logger = TestLogger(level_console=LogLevel.DEBUG) logger_adapter = LoggerAdapter('TEST_LOGGER', logger) # Act logger_adapter.debug("This is a log message.") # Assert self.assertTrue(True) # Does not raise errors.
class TradingNode: """ Provides an asynchronous network node for live trading. """ def __init__( self, strategies: List[TradingStrategy], config: Dict[str, object], ): """ Initialize a new instance of the TradingNode class. Parameters ---------- strategies : list[TradingStrategy] The list of strategies to run on the trading node. config : dict[str, object] The configuration for the trading node. Raises ------ ValueError If strategies is None or empty. ValueError If config is None or empty. """ PyCondition.not_none(strategies, "strategies") PyCondition.not_none(config, "config") PyCondition.not_empty(strategies, "strategies") PyCondition.not_empty(config, "config") # Extract configs config_trader = config.get("trader", {}) config_log = config.get("logging", {}) config_exec_db = config.get("exec_database", {}) config_strategy = config.get("strategy", {}) config_adapters = config.get("adapters", {}) self._uuid_factory = UUIDFactory() self._loop = asyncio.get_event_loop() self._executor = concurrent.futures.ThreadPoolExecutor() self._loop.set_default_executor(self._executor) self._clock = LiveClock(loop=self._loop) self.created_time = self._clock.utc_now() self._is_running = False # Uncomment for debugging # self._loop.set_debug(True) # Setup identifiers self.trader_id = TraderId( name=config_trader["name"], tag=config_trader["id_tag"], ) # Setup logging self._logger = LiveLogger( clock=self._clock, name=self.trader_id.value, level_console=LogLevelParser.from_str_py(config_log.get("log_level_console")), level_file=LogLevelParser.from_str_py(config_log.get("log_level_file")), level_store=LogLevelParser.from_str_py(config_log.get("log_level_store")), run_in_process=config_log.get("run_in_process", True), # Run logger in a separate process log_thread=config_log.get("log_thread_id", False), log_to_file=config_log.get("log_to_file", False), log_file_path=config_log.get("log_file_path", ""), ) self._log = LoggerAdapter(component_name=self.__class__.__name__, logger=self._logger) self._log_header() self._log.info("Building...") self._setup_loop() # Requires the logger to be initialized self.portfolio = Portfolio( clock=self._clock, logger=self._logger, ) self._data_engine = LiveDataEngine( loop=self._loop, portfolio=self.portfolio, clock=self._clock, logger=self._logger, config={"qsize": 10000}, ) self.portfolio.register_cache(self._data_engine.cache) self.analyzer = PerformanceAnalyzer() if config_exec_db["type"] == "redis": exec_db = RedisExecutionDatabase( trader_id=self.trader_id, logger=self._logger, command_serializer=MsgPackCommandSerializer(), event_serializer=MsgPackEventSerializer(), config={ "host": config_exec_db["host"], "port": config_exec_db["port"], } ) else: exec_db = BypassExecutionDatabase( trader_id=self.trader_id, logger=self._logger, ) self._exec_engine = LiveExecutionEngine( loop=self._loop, database=exec_db, portfolio=self.portfolio, clock=self._clock, logger=self._logger, config={"qsize": 10000}, ) self._exec_engine.load_cache() self._setup_adapters(config_adapters, self._logger) self.trader = Trader( trader_id=self.trader_id, strategies=strategies, portfolio=self.portfolio, data_engine=self._data_engine, exec_engine=self._exec_engine, clock=self._clock, logger=self._logger, ) self._check_residuals_delay = config_trader.get("check_residuals_delay", 5.0) self._load_strategy_state = config_strategy.get("load_state", True) self._save_strategy_state = config_strategy.get("save_state", True) if self._load_strategy_state: self.trader.load() self._log.info("state=INITIALIZED.") self.time_to_initialize = self._clock.delta(self.created_time) self._log.info(f"Initialized in {self.time_to_initialize.total_seconds():.3f}s.") @property def is_running(self) -> bool: """ If the trading node is running. Returns ------- bool True if running, else False. """ return self._is_running def get_event_loop(self) -> asyncio.AbstractEventLoop: """ Return the event loop of the trading node. Returns ------- asyncio.AbstractEventLoop """ return self._loop def get_logger(self) -> LiveLogger: """ Return the logger for the trading node. Returns ------- LiveLogger """ return self._logger def start(self) -> None: """ Start the trading node. """ try: if self._loop.is_running(): self._loop.create_task(self._run()) else: self._loop.run_until_complete(self._run()) except RuntimeError as ex: self._log.exception(ex) def stop(self) -> None: """ Stop the trading node gracefully. After a specified delay the internal `Trader` residuals will be checked. If save strategy is specified then strategy states will then be saved. """ try: if self._loop.is_running(): self._loop.create_task(self._stop()) else: self._loop.run_until_complete(self._stop()) except RuntimeError as ex: self._log.exception(ex) def dispose(self) -> None: """ Dispose of the trading node. Gracefully shuts down the executor and event loop. """ try: timeout = self._clock.utc_now() + timedelta(seconds=5) while self._is_running: time.sleep(0.1) if self._clock.utc_now() >= timeout: self._log.warning("Timed out (5s) waiting for node to stop.") break self._log.info("state=DISPOSING...") self._log.debug(f"{self._data_engine.get_run_queue_task()}") self._log.debug(f"{self._exec_engine.get_run_queue_task()}") self.trader.dispose() self._data_engine.dispose() self._exec_engine.dispose() self._log.info("Shutting down executor...") if sys.version_info >= (3, 9): # cancel_futures added in Python 3.9 self._executor.shutdown(wait=True, cancel_futures=True) else: self._executor.shutdown(wait=True) self._log.info("Stopping event loop...") self._loop.stop() self._cancel_all_tasks() except RuntimeError as ex: self._log.error("Shutdown coro issues will be fixed soon...") # TODO: Remove when fixed self._log.exception(ex) finally: if self._loop.is_running(): self._log.warning("Cannot close a running event loop.") else: self._log.info("Closing event loop...") self._loop.close() # Check and log if event loop is running if self._loop.is_running(): self._log.warning(f"loop.is_running={self._loop.is_running()}") else: self._log.info(f"loop.is_running={self._loop.is_running()}") # Check and log if event loop is closed if not self._loop.is_closed(): self._log.warning(f"loop.is_closed={self._loop.is_closed()}") else: self._log.info(f"loop.is_closed={self._loop.is_closed()}") self._log.info("state=DISPOSED.") self._logger.stop() # Ensure process is stopped time.sleep(0.1) # Ensure final log messages def _log_header(self) -> None: nautilus_header(self._log) self._log.info(f"redis {redis.__version__}") self._log.info(f"msgpack {msgpack.version[0]}.{msgpack.version[1]}.{msgpack.version[2]}") if uvloop_version: self._log.info(f"uvloop {uvloop_version}") self._log.info("=================================================================") def _setup_loop(self) -> None: if self._loop.is_closed(): self._log.error("Cannot setup signal handling (event loop was closed).") return signal.signal(signal.SIGINT, signal.SIG_DFL) signals = (signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGABRT) for sig in signals: self._loop.add_signal_handler(sig, self._loop_sig_handler, sig) self._log.debug(f"Event loop {signals} handling setup.") def _loop_sig_handler(self, sig: signal.signal) -> None: self._loop.remove_signal_handler(signal.SIGTERM) self._loop.add_signal_handler(signal.SIGINT, lambda: None) self._log.warning(f"Received {sig!s}, shutting down...") self.stop() def _setup_adapters(self, config: Dict[str, object], logger: LiveLogger) -> None: # Setup each data client for name, config in config.items(): if name.startswith("ccxt-"): try: import ccxtpro # TODO: Find a better way of doing this except ImportError: raise ImportError("ccxtpro is not installed, " "installation instructions can be found at https://ccxt.pro") client_cls = getattr(ccxtpro, name.partition('-')[2].lower()) data_client, exec_client = CCXTClientsFactory.create( client_cls=client_cls, config=config, data_engine=self._data_engine, exec_engine=self._exec_engine, clock=self._clock, logger=logger, ) elif name == "oanda": data_client = OandaDataClientFactory.create( config=config, data_engine=self._data_engine, clock=self._clock, logger=logger, ) exec_client = None # TODO: Implement else: self._log.error(f"No adapter available for `{name}`.") continue if data_client is not None: self._data_engine.register_client(data_client) if exec_client is not None: self._exec_engine.register_client(exec_client) async def _run(self) -> None: try: self._log.info("state=STARTING...") self._is_running = True self._data_engine.start() self._exec_engine.start() result: bool = await self._await_engines_connected() if not result: return result: bool = await self._exec_engine.resolve_state() if not result: return self.trader.start() if self._loop.is_running(): self._log.info("state=RUNNING.") else: self._log.warning("Event loop is not running.") # Continue to run while engines are running... await self._data_engine.get_run_queue_task() await self._exec_engine.get_run_queue_task() except asyncio.CancelledError as ex: self._log.error(str(ex)) async def _await_engines_connected(self) -> bool: self._log.info("Waiting for engines to initialize...") # The data engine clients will be set as connected when all # instruments are received and updated with the data engine. # The execution engine clients will be set as connected when all # accounts are updated and the current order and position status is # confirmed. Thus any delay here will be due to blocking network IO. seconds = 5 # Hard coded for now timeout: timedelta = self._clock.utc_now() + timedelta(seconds=seconds) while True: await asyncio.sleep(0.1) if self._clock.utc_now() >= timeout: self._log.error(f"Timed out ({seconds}s) waiting for " f"engines to initialize.") return False if not self._data_engine.check_connected(): continue if not self._exec_engine.check_connected(): continue break return True # Engines initialized async def _stop(self) -> None: self._is_stopping = True self._log.info("state=STOPPING...") if self.trader.state == ComponentState.RUNNING: self.trader.stop() self._log.info(f"Awaiting residual state ({self._check_residuals_delay}s delay)...") await asyncio.sleep(self._check_residuals_delay) self.trader.check_residuals() if self._save_strategy_state: self.trader.save() if self._data_engine.state == ComponentState.RUNNING: self._data_engine.stop() if self._exec_engine.state == ComponentState.RUNNING: self._exec_engine.stop() await self._await_engines_disconnected() # Clean up remaining timers timer_names = self._clock.timer_names() self._clock.cancel_timers() for name in timer_names: self._log.info(f"Cancelled Timer(name={name}).") self._log.info("state=STOPPED.") self._is_running = False async def _await_engines_disconnected(self) -> None: self._log.info("Waiting for engines to disconnect...") seconds = 5 # Hard coded for now timeout: timedelta = self._clock.utc_now() + timedelta(seconds=seconds) while True: await asyncio.sleep(0.1) if self._clock.utc_now() >= timeout: self._log.warning(f"Timed out ({seconds}s) waiting for engines to disconnect.") break if not self._data_engine.check_disconnected(): continue if not self._exec_engine.check_disconnected(): continue break # Engines initialized def _cancel_all_tasks(self) -> None: to_cancel = asyncio.tasks.all_tasks(self._loop) if not to_cancel: self._log.info("All tasks finished.") return for task in to_cancel: self._log.warning(f"Cancelling pending task {task}") task.cancel() if self._loop.is_running(): self._log.warning("Event loop still running during `cancel_all_tasks`.") return finish_all_tasks: asyncio.Future = asyncio.tasks.gather( *to_cancel, loop=self._loop, return_exceptions=True, ) self._loop.run_until_complete(finish_all_tasks) self._log.debug(f"{finish_all_tasks}") for task in to_cancel: if task.cancelled(): continue if task.exception() is not None: self._loop.call_exception_handler({ 'message': 'unhandled exception during asyncio.run() shutdown', 'exception': task.exception(), 'task': task, })
class BinanceInstrumentProvider(InstrumentProvider): """ Provides a means of loading `Instrument` from the Binance API. Parameters ---------- client : APIClient The client for the provider. logger : Logger The logger for the provider. """ def __init__( self, client: BinanceHttpClient, logger: Logger, ): super().__init__() self.venue = BINANCE_VENUE self._client = client self._log = LoggerAdapter(type(self).__name__, logger) self._wallet = BinanceWalletHttpAPI(self._client) self._spot_market = BinanceSpotMarketHttpAPI(self._client) # Async loading flags self._loaded = False self._loading = False async def load_all_or_wait_async(self) -> None: """ Load the latest Binance instruments into the provider asynchronously, or await loading. If `load_async` has been previously called then will immediately return. """ if self._loaded: return # Already loaded if not self._loading: self._log.debug("Loading instruments...") await self.load_all_async() self._log.info(f"Loaded {self.count} instruments.") else: self._log.debug("Awaiting loading...") while self._loading: # Wait 100ms await asyncio.sleep(0.1) async def load_all_async(self) -> None: """ Load the latest Binance instruments into the provider asynchronously. """ # Set async loading flag self._loading = True # Get current commission rates try: fee_res: List[Dict[str, str]] = await self._wallet.trade_fee() fees: Dict[str, Dict[str, str]] = {s["symbol"]: s for s in fee_res} except BinanceClientError: self._log.error( "Cannot load instruments: API key authentication failed " "(this is needed to fetch the applicable account fee tier).", ) return # Get exchange info for all assets assets_res: Dict[str, Any] = await self._spot_market.exchange_info() server_time_ns: int = millis_to_nanos(assets_res["serverTime"]) for info in assets_res["symbols"]: local_symbol = Symbol(info["symbol"]) # Create base asset base_asset: str = info["baseAsset"] base_currency = Currency( code=base_asset, precision=info["baseAssetPrecision"], iso4217=0, # Currently undetermined for crypto assets name=base_asset, currency_type=CurrencyType.CRYPTO, ) # Create quote asset quote_asset: str = info["quoteAsset"] quote_currency = Currency( code=quote_asset, precision=info["quoteAssetPrecision"], iso4217=0, # Currently undetermined for crypto assets name=quote_asset, currency_type=CurrencyType.CRYPTO, ) # symbol = Symbol(base_currency.code + "/" + quote_currency.code) instrument_id = InstrumentId(symbol=local_symbol, venue=BINANCE_VENUE) # Parse instrument filters symbol_filters = {f["filterType"]: f for f in info["filters"]} price_filter = symbol_filters.get("PRICE_FILTER") lot_size_filter = symbol_filters.get("LOT_SIZE") min_notional_filter = symbol_filters.get("MIN_NOTIONAL") # market_lot_size_filter = symbol_filters.get("MARKET_LOT_SIZE") tick_size = price_filter["tickSize"].rstrip("0") step_size = lot_size_filter["stepSize"].rstrip("0") price_precision = precision_from_str(tick_size) size_precision = precision_from_str(step_size) price_increment = Price.from_str(tick_size) size_increment = Quantity.from_str(step_size) lot_size = Quantity.from_str(step_size) max_quantity = Quantity(float(lot_size_filter["maxQty"]), precision=size_precision) min_quantity = Quantity(float(lot_size_filter["minQty"]), precision=size_precision) min_notional = None if min_notional_filter is not None: min_notional = Money(min_notional_filter["minNotional"], currency=quote_currency) max_price = Price(float(price_filter["maxPrice"]), precision=price_precision) min_price = Price(float(price_filter["minPrice"]), precision=price_precision) pair_fees = fees.get(local_symbol.value) maker_fee: Decimal = Decimal(0) taker_fee: Decimal = Decimal(0) if pair_fees: maker_fee = Decimal(pair_fees["makerCommission"]) taker_fee = Decimal(pair_fees["takerCommission"]) # Create instrument instrument = CurrencySpot( instrument_id=instrument_id, local_symbol=local_symbol, base_currency=base_currency, quote_currency=quote_currency, price_precision=price_precision, size_precision=size_precision, price_increment=price_increment, size_increment=size_increment, lot_size=lot_size, max_quantity=max_quantity, min_quantity=min_quantity, max_notional=None, min_notional=min_notional, max_price=max_price, min_price=min_price, margin_init=Decimal(0), margin_maint=Decimal(0), maker_fee=maker_fee, taker_fee=taker_fee, ts_event=server_time_ns, ts_init=time.time_ns(), info=info, ) self.add_currency(currency=base_currency) self.add_currency(currency=quote_currency) self.add(instrument=instrument) # Set async loading flags self._loading = False self._loaded = True
class TradingNode: """ Provides an asynchronous network node for live trading. """ def __init__( self, strategies: List[TradingStrategy], config: Dict[str, object], ): """ Initialize a new instance of the TradingNode class. Parameters ---------- strategies : list[TradingStrategy] The list of strategies to run on the trading node. config : dict[str, object] The configuration for the trading node. """ if strategies is None: strategies = [] config_trader = config.get("trader", {}) config_log = config.get("logging", {}) config_exec_db = config.get("exec_database", {}) config_strategy = config.get("strategy", {}) config_data_clients = config.get("data_clients", {}) config_exec_clients = config.get("exec_clients", {}) self._clock = LiveClock() self._uuid_factory = UUIDFactory() self._loop = asyncio.get_event_loop() self._executor = concurrent.futures.ThreadPoolExecutor() self._loop.set_default_executor(self._executor) self._loop.set_debug(False) # TODO: Development self._is_running = False # Setup identifiers self.trader_id = TraderId( name=config_trader["name"], tag=config_trader["id_tag"], ) # Setup logging logger = LiveLogger( clock=self._clock, name=self.trader_id.value, level_console=LogLevelParser.from_str_py( config_log.get("log_level_console")), level_file=LogLevelParser.from_str_py( config_log.get("log_level_file")), level_store=LogLevelParser.from_str_py( config_log.get("log_level_store")), log_thread=config_log.get("log_thread_id", True), log_to_file=config_log.get("log_to_file", False), log_file_path=config_log.get("log_file_path", ""), ) self._log = LoggerAdapter(component_name=self.__class__.__name__, logger=logger) self._log_header() self._log.info("Building...") self.portfolio = Portfolio( clock=self._clock, logger=logger, ) self._data_engine = LiveDataEngine( loop=self._loop, portfolio=self.portfolio, clock=self._clock, logger=logger, ) self.portfolio.register_cache(self._data_engine.cache) self.analyzer = PerformanceAnalyzer() if config_exec_db["type"] == "redis": exec_db = RedisExecutionDatabase( trader_id=self.trader_id, logger=logger, command_serializer=MsgPackCommandSerializer(), event_serializer=MsgPackEventSerializer(), config={ "host": config_exec_db["host"], "port": config_exec_db["port"], }) else: exec_db = BypassExecutionDatabase( trader_id=self.trader_id, logger=logger, ) self._exec_engine = LiveExecutionEngine( loop=self._loop, database=exec_db, portfolio=self.portfolio, clock=self._clock, logger=logger, ) self._exec_engine.load_cache() self._setup_data_clients(config_data_clients, logger) self._setup_exec_clients(config_exec_clients, logger) self.trader = Trader( trader_id=self.trader_id, strategies=strategies, data_engine=self._data_engine, exec_engine=self._exec_engine, clock=self._clock, logger=logger, ) self._check_residuals_delay = 2.0 # Hard coded delay (refactor) self._load_strategy_state = config_strategy.get("load_state", True) self._save_strategy_state = config_strategy.get("save_state", True) if self._load_strategy_state: self.trader.load() self._setup_loop() self._log.info("state=INITIALIZED.") def get_event_loop(self): """ Return the event loop of the trading node. Returns ------- asyncio.AbstractEventLoop """ return self._loop def start(self): """ Start the trading node. """ try: if self._loop.is_running(): self._loop.create_task(self._run()) else: self._loop.run_until_complete(self._run()) except RuntimeError as ex: self._log.exception(ex) def stop(self): """ Stop the trading node gracefully. After a specified delay the internal `Trader` residuals will be checked. If save strategy is specified then strategy states will then be saved. """ try: if self._loop.is_running(): self._loop.create_task(self._stop()) else: self._loop.run_until_complete(self._stop()) except RuntimeError as ex: self._log.exception(ex) # noinspection PyTypeChecker # Expected timedelta, got datetime.pyi instead def dispose(self): """ Dispose of the trading node. Gracefully shuts down the executor and event loop. """ try: timeout = self._clock.utc_now() + timedelta(seconds=5) while self._is_running: time.sleep(0.1) if self._clock.utc_now() >= timeout: self._log.warning( "Timed out (5s) waiting for node to stop.") break self._log.info("state=DISPOSING...") self.trader.dispose() self._data_engine.dispose() self._exec_engine.dispose() self._log.info("Shutting down executor...") if is_ge_python_version(major=3, minor=9): # cancel_futures added in Python 3.9 self._executor.shutdown(wait=True, cancel_futures=True) else: self._executor.shutdown(wait=True) self._log.info("Stopping event loop...") self._loop.stop() if is_ge_python_version(major=3, minor=7): # tasks.all_tasks added in Python 3.7 self._cancel_all_tasks() except RuntimeError as ex: self._log.exception(ex) finally: if self._loop.is_running(): self._log.warning("Cannot close a running event loop.") else: self._log.info("Closing event loop...") self._loop.close() # Check and log if event loop is running if self._loop.is_running(): self._log.warning(f"loop.is_running={self._loop.is_running()}") else: self._log.info(f"loop.is_running={self._loop.is_running()}") # Check and log if event loop is closed if not self._loop.is_closed(): self._log.warning(f"loop.is_closed={self._loop.is_closed()}") else: self._log.info(f"loop.is_closed={self._loop.is_closed()}") self._log.info("state=DISPOSED.") time.sleep(0.1) # Assist final logging to daemonic logging thread def _log_header(self): nautilus_header(self._log) self._log.info(f"redis {redis.__version__}") self._log.info( f"msgpack {msgpack.version[0]}.{msgpack.version[1]}.{msgpack.version[2]}" ) if uvloop_version: self._log.info(f"uvloop {uvloop_version}") self._log.info( "=================================================================" ) def _setup_data_clients(self, config, logger): try: # Setup each data for name, config in config.items(): if name == "binance": data_client = BinanceDataClientFactory.create( config=config, data_engine=self._data_engine, clock=self._clock, logger=logger, ) self._data_engine.register_client(data_client) elif name == "oanda": data_client = OandaDataClientFactory.create( config=config, data_engine=self._data_engine, clock=self._clock, logger=logger, ) self._data_engine.register_client(data_client) else: self._log.error(f"No DataClient available for `{name}`.") except RuntimeError as ex: self._log.exception(ex) def _setup_exec_clients(self, config, logger): try: # Setup each data for name, config in config.items(): if name == "binance": pass elif name == "oanda": pass else: self._log.error( f"No ExecutionClient available for `{name}`.") except RuntimeError as ex: self._log.exception(ex) def _setup_loop(self): signal.signal(signal.SIGINT, signal.SIG_DFL) signals = (signal.SIGTERM, signal.SIGINT) if self._loop.is_closed(): self._log.error( "Cannot setup signal handling (event loop was closed).") return for sig in signals: self._loop.add_signal_handler(sig, self._loop_sig_handler, sig) self._log.debug(f"Event loop {signals} handling setup.") def _loop_sig_handler(self, sig): self._loop.remove_signal_handler(signal.SIGTERM) self._loop.add_signal_handler(signal.SIGINT, lambda: None) self._log.warning(f"Received {sig!s}, shutting down...") self.stop() async def _run(self): try: self._log.info("state=STARTING...") self._data_engine.start() self._exec_engine.start() # Wait for engines to initialize (will hang if never initialized) await self._loop.run_in_executor(None, self._wait_for_engines) self.trader.start() if self._loop.is_running(): self._log.info("state=RUNNING.") else: self._log.warning("Event loop is not running.") self._is_running = True # Continue to run loop while engines are running... await self._data_engine.get_run_queue_task() await self._exec_engine.get_run_queue_task() except asyncio.exceptions.CancelledError as ex: self._log.error(str(ex)) def _wait_for_engines(self): self._log.info("Waiting for engines to initialize...") # The engines require that all of their clients are initialized. # The data engine clients will be set as initialized when all # instruments are received and updated with the data engine. # The execution engine clients will be set as initialized when all # accounts are updated and the current order and position status is # confirmed. Thus any delay here will be due to blocking network IO. while True: time.sleep(0.1) if not self._data_engine.check_initialized(): continue if not self._exec_engine.check_initialized(): continue return True # Engines initialized async def _stop(self): self._is_stopping = True self._log.info("state=STOPPING...") self.trader.stop() self._log.info("Awaiting residual state...") await asyncio.sleep(self._check_residuals_delay) self.trader.check_residuals() if self._save_strategy_state: self.trader.save() self._data_engine.stop() self._exec_engine.stop() await self._data_engine.get_run_queue_task() await self._exec_engine.get_run_queue_task() self._log.debug(f"{self._data_engine.get_run_queue_task()}") self._log.debug(f"{self._exec_engine.get_run_queue_task()}") # Clean up remaining timers timer_names = self._clock.timer_names() self._clock.cancel_timers() for name in timer_names: self._log.info(f"Cancelled Timer(name={name}).") self._log.info("state=STOPPED.") self._is_running = False def _cancel_all_tasks(self): to_cancel = tasks.all_tasks(self._loop) if not to_cancel: self._log.info("All tasks finished.") return for task in to_cancel: self._log.warning(f"Cancelling pending task {task}") task.cancel() if self._loop.is_running(): self._log.warning( "Event loop still running during `cancel_all_tasks`.") return finish_all_tasks = tasks.gather(*to_cancel, loop=self._loop, return_exceptions=True) self._loop.run_until_complete(finish_all_tasks) self._log.debug(f"{finish_all_tasks}") for task in to_cancel: if task.cancelled(): continue if task.exception() is not None: self._loop.call_exception_handler({ 'message': 'unhandled exception during asyncio.run() shutdown', 'exception': task.exception(), 'task': task, })
class TradingNode: """ Provides an asynchronous network node for live trading. """ def __init__( self, strategies: List[TradingStrategy], config: Dict[str, object], ): """ Initialize a new instance of the TradingNode class. Parameters ---------- strategies : list[TradingStrategy] The list of strategies to run on the trading node. config : dict[str, object] The configuration for the trading node. Raises ------ ValueError If strategies is None or empty. ValueError If config is None or empty. """ PyCondition.not_none(strategies, "strategies") PyCondition.not_none(config, "config") PyCondition.not_empty(strategies, "strategies") PyCondition.not_empty(config, "config") self._config = config # Extract configs config_trader = config.get("trader", {}) config_system = config.get("system", {}) config_log = config.get("logging", {}) config_exec_db = config.get("exec_database", {}) config_risk = config.get("risk", {}) config_strategy = config.get("strategy", {}) # System config self._connection_timeout = config_system.get("connection_timeout", 5.0) self._disconnection_timeout = config_system.get( "disconnection_timeout", 5.0) self._check_residuals_delay = config_system.get( "check_residuals_delay", 5.0) self._load_strategy_state = config_strategy.get("load_state", True) self._save_strategy_state = config_strategy.get("save_state", True) # Setup loop self._loop = asyncio.get_event_loop() self._executor = concurrent.futures.ThreadPoolExecutor() self._loop.set_default_executor(self._executor) self._loop.set_debug(config_system.get("loop_debug", False)) # Components self._clock = LiveClock(loop=self._loop) self._uuid_factory = UUIDFactory() self.system_id = self._uuid_factory.generate() self.created_time = self._clock.utc_now() self._is_running = False # Setup identifiers self.trader_id = TraderId( name=config_trader["name"], tag=config_trader["id_tag"], ) # Setup logging level_stdout = LogLevelParser.from_str_py( config_log.get("level_stdout")) self._logger = LiveLogger( loop=self._loop, clock=self._clock, trader_id=self.trader_id, system_id=self.system_id, level_stdout=level_stdout, ) self._log = LoggerAdapter( component=self.__class__.__name__, logger=self._logger, ) self._log_header() self._log.info("Building...") if platform.system() != "Windows": # Requires the logger to be initialized # Windows does not support signal handling # https://stackoverflow.com/questions/45987985/asyncio-loops-add-signal-handler-in-windows self._setup_loop() # Build platform # ---------------------------------------------------------------------- self.portfolio = Portfolio( clock=self._clock, logger=self._logger, ) self._data_engine = LiveDataEngine( loop=self._loop, portfolio=self.portfolio, clock=self._clock, logger=self._logger, config={"qsize": 10000}, ) self.portfolio.register_cache(self._data_engine.cache) self.analyzer = PerformanceAnalyzer() if config_exec_db["type"] == "redis": exec_db = RedisExecutionDatabase( trader_id=self.trader_id, logger=self._logger, command_serializer=MsgPackCommandSerializer(), event_serializer=MsgPackEventSerializer(), config={ "host": config_exec_db["host"], "port": config_exec_db["port"], }, ) else: exec_db = BypassExecutionDatabase( trader_id=self.trader_id, logger=self._logger, ) self._exec_engine = LiveExecutionEngine( loop=self._loop, database=exec_db, portfolio=self.portfolio, clock=self._clock, logger=self._logger, config={"qsize": 10000}, ) self._risk_engine = LiveRiskEngine( loop=self._loop, exec_engine=self._exec_engine, portfolio=self.portfolio, clock=self._clock, logger=self._logger, config=config_risk, ) self._exec_engine.load_cache() self._exec_engine.register_risk_engine(self._risk_engine) self.trader = Trader( trader_id=self.trader_id, strategies=strategies, portfolio=self.portfolio, data_engine=self._data_engine, exec_engine=self._exec_engine, risk_engine=self._risk_engine, clock=self._clock, logger=self._logger, ) if self._load_strategy_state: self.trader.load() self._builder = TradingNodeBuilder( data_engine=self._data_engine, exec_engine=self._exec_engine, risk_engine=self._risk_engine, clock=self._clock, logger=self._logger, log=self._log, ) self._log.info("state=INITIALIZED.") self.time_to_initialize = self._clock.delta(self.created_time) self._log.info( f"Initialized in {self.time_to_initialize.total_seconds():.3f}s.") self._is_built = False @property def is_running(self) -> bool: """ If the trading node is running. Returns ------- bool True if running, else False. """ return self._is_running @property def is_built(self) -> bool: """ If the trading node clients are built. Returns ------- bool True if built, else False. """ return self._is_built def get_event_loop(self) -> asyncio.AbstractEventLoop: """ Return the event loop of the trading node. Returns ------- asyncio.AbstractEventLoop """ return self._loop def get_logger(self) -> LiveLogger: """ Return the logger for the trading node. Returns ------- LiveLogger """ return self._logger def add_data_client_factory(self, name, factory): """ Add the given data client factory to the node. Parameters ---------- name : str The name of the client factory. factory : LiveDataClientFactory or LiveExecutionClientFactory The factory to add. Raises ------ ValueError If name is not a valid string. KeyError If name has already been added. """ self._builder.add_data_client_factory(name, factory) def add_exec_client_factory(self, name, factory): """ Add the given execution client factory to the node. Parameters ---------- name : str The name of the client factory. factory : LiveDataClientFactory or LiveExecutionClientFactory The factory to add. Raises ------ ValueError If name is not a valid string. KeyError If name has already been added. """ self._builder.add_exec_client_factory(name, factory) def build(self) -> None: """ Build the nodes clients. """ if self._is_built: raise RuntimeError("The trading nodes clients are already built.") self._builder.build_data_clients(self._config.get("data_clients")) self._builder.build_exec_clients(self._config.get("exec_clients")) self._is_built = True def start(self) -> None: """ Start the trading node. """ if not self._is_built: raise RuntimeError( "The trading nodes clients have not been built. " "Please run `node.build()` prior to start.") try: if self._loop.is_running(): self._loop.create_task(self._run()) else: self._loop.run_until_complete(self._run()) except RuntimeError as ex: self._log.exception(ex) def stop(self) -> None: """ Stop the trading node gracefully. After a specified delay the internal `Trader` residuals will be checked. If save strategy is specified then strategy states will then be saved. """ try: if self._loop.is_running(): self._loop.create_task(self._stop()) else: self._loop.run_until_complete(self._stop()) except RuntimeError as ex: self._log.exception(ex) def dispose(self) -> None: """ Dispose of the trading node. Gracefully shuts down the executor and event loop. """ try: timeout = self._clock.utc_now() + timedelta(seconds=5) while self._is_running: time.sleep(0.1) if self._clock.utc_now() >= timeout: self._log.warning( "Timed out (5s) waiting for node to stop.") break self._log.info("state=DISPOSING...") self._log.debug(f"{self._data_engine.get_run_queue_task()}") self._log.debug(f"{self._exec_engine.get_run_queue_task()}") self._log.debug(f"{self._risk_engine.get_run_queue_task()}") self.trader.dispose() self._data_engine.dispose() self._exec_engine.dispose() self._risk_engine.dispose() self._log.info("Shutting down executor...") if sys.version_info >= (3, 9): # cancel_futures added in Python 3.9 self._executor.shutdown(wait=True, cancel_futures=True) else: self._executor.shutdown(wait=True) self._log.info("Stopping event loop...") self._cancel_all_tasks() self._logger.stop() self._loop.stop() except RuntimeError as ex: self._log.exception(ex) finally: if self._loop.is_running(): self._log.warning("Cannot close a running event loop.") else: self._log.info("Closing event loop...") self._loop.close() # Check and log if event loop is running if self._loop.is_running(): self._log.warning(f"loop.is_running={self._loop.is_running()}") else: self._log.info(f"loop.is_running={self._loop.is_running()}") # Check and log if event loop is closed if not self._loop.is_closed(): self._log.warning(f"loop.is_closed={self._loop.is_closed()}") else: self._log.info(f"loop.is_closed={self._loop.is_closed()}") self._log.info("state=DISPOSED.") def _log_header(self) -> None: nautilus_header(self._log) self._log.info(f"redis {redis.__version__}") self._log.info( f"msgpack {msgpack.version[0]}.{msgpack.version[1]}.{msgpack.version[2]}" ) if uvloop_version: self._log.info(f"uvloop {uvloop_version}") self._log.info( "=================================================================" ) def _setup_loop(self) -> None: if self._loop.is_closed(): self._log.error( "Cannot setup signal handling (event loop was closed).") return signal.signal(signal.SIGINT, signal.SIG_DFL) signals = (signal.SIGTERM, signal.SIGINT, signal.SIGABRT) for sig in signals: self._loop.add_signal_handler(sig, self._loop_sig_handler, sig) self._log.debug(f"Event loop {signals} handling setup.") def _loop_sig_handler(self, sig: signal.signal) -> None: self._loop.remove_signal_handler(signal.SIGTERM) self._loop.add_signal_handler(signal.SIGINT, lambda: None) self._log.warning(f"Received {sig!s}, shutting down...") self.stop() async def _run(self) -> None: try: self._log.info("state=STARTING...") self._is_running = True self._logger.start() self._data_engine.start() self._exec_engine.start() self._risk_engine.start() result: bool = await self._await_engines_connected() if not result: return result: bool = await self._exec_engine.reconcile_state() if not result: return self.trader.start() if self._loop.is_running(): self._log.info("state=RUNNING.") else: self._log.warning("Event loop is not running.") # Continue to run while engines are running... await self._data_engine.get_run_queue_task() await self._exec_engine.get_run_queue_task() await self._risk_engine.get_run_queue_task() except asyncio.CancelledError as ex: self._log.error(str(ex)) async def _await_engines_connected(self) -> bool: self._log.info(f"Waiting for engines to initialize " f"({self._connection_timeout}s timeout)...") # The data engine clients will be set as connected when all # instruments are received and updated with the data engine. # The execution engine clients will be set as connected when all # accounts are updated and the current order and position status is # reconciled. Thus any delay here will be due to blocking network IO. seconds = self._connection_timeout timeout: timedelta = self._clock.utc_now() + timedelta(seconds=seconds) while True: await asyncio.sleep(0) if self._clock.utc_now() >= timeout: self._log.error( f"Timed out ({seconds}s) waiting for engines to connect.") return False if not self._data_engine.check_connected(): continue if not self._exec_engine.check_connected(): continue break return True # Engines connected async def _stop(self) -> None: self._is_stopping = True self._log.info("state=STOPPING...") if self.trader.state == ComponentState.RUNNING: self.trader.stop() self._log.info( f"Awaiting residual state ({self._check_residuals_delay}s delay)..." ) await asyncio.sleep(self._check_residuals_delay) self.trader.check_residuals() if self._save_strategy_state: self.trader.save() if self._data_engine.state == ComponentState.RUNNING: self._data_engine.stop() if self._exec_engine.state == ComponentState.RUNNING: self._exec_engine.stop() if self._risk_engine.state == ComponentState.RUNNING: self._risk_engine.stop() await self._await_engines_disconnected() # Clean up remaining timers timer_names = self._clock.timer_names() self._clock.cancel_timers() for name in timer_names: self._log.info(f"Cancelled Timer(name={name}).") self._log.info("state=STOPPED.") self._is_running = False async def _await_engines_disconnected(self) -> None: self._log.info(f"Waiting for engines to disconnect " f"({self._disconnection_timeout}s timeout)...") seconds = self._disconnection_timeout timeout: timedelta = self._clock.utc_now() + timedelta(seconds=seconds) while True: await asyncio.sleep(0) if self._clock.utc_now() >= timeout: self._log.error( f"Timed out ({seconds}s) waiting for engines to disconnect." ) break if not self._data_engine.check_disconnected(): continue if not self._exec_engine.check_disconnected(): continue break def _cancel_all_tasks(self) -> None: to_cancel = asyncio.tasks.all_tasks(self._loop) if not to_cancel: self._log.info("All tasks finished.") return for task in to_cancel: self._log.warning(f"Cancelling pending task {task}") task.cancel() if self._loop.is_running(): self._log.warning( "Event loop still running during `cancel_all_tasks`.") return finish_all_tasks: asyncio.Future = asyncio.tasks.gather( *to_cancel, loop=self._loop, return_exceptions=True, ) self._loop.run_until_complete(finish_all_tasks) self._log.debug(f"{finish_all_tasks}") for task in to_cancel: if task.cancelled(): continue if task.exception() is not None: self._loop.call_exception_handler({ "message": "unhandled exception during asyncio.run() shutdown", "exception": task.exception(), "task": task, })
class TestBetfairExecutionClient: def setup(self): # Fixture Setup self.loop = asyncio.get_event_loop() self.loop.set_debug(True) self.clock = LiveClock() self.uuid_factory = UUIDFactory() self.trader_id = TestStubs.trader_id() self.venue = BETFAIR_VENUE self.account_id = AccountId(self.venue.value, "001") # Setup logging self.logger = LiveLogger(loop=self.loop, clock=self.clock, level_stdout=LogLevel.DEBUG) self._log = LoggerAdapter("TestBetfairExecutionClient", self.logger) self.msgbus = MessageBus( trader_id=self.trader_id, clock=self.clock, logger=self.logger, ) self.cache = TestStubs.cache() self.cache.add_instrument(BetfairTestStubs.betting_instrument()) self.cache.add_account( TestStubs.betting_account(account_id=self.account_id)) self.portfolio = Portfolio( msgbus=self.msgbus, cache=self.cache, clock=self.clock, logger=self.logger, ) self.exec_engine = LiveExecutionEngine( loop=self.loop, msgbus=self.msgbus, cache=self.cache, clock=self.clock, logger=self.logger, ) self.betfair_client: BetfairClient = BetfairTestStubs.betfair_client( loop=self.loop, logger=self.logger) assert self.betfair_client.session_token self.instrument_provider = BetfairTestStubs.instrument_provider( betfair_client=self.betfair_client) self.client = BetfairExecutionClient( loop=asyncio.get_event_loop(), client=self.betfair_client, account_id=self.account_id, base_currency=GBP, msgbus=self.msgbus, cache=self.cache, clock=self.clock, logger=self.logger, instrument_provider=self.instrument_provider, market_filter={}, ) self.exec_engine.register_client(self.client) # Re-route exec engine messages through `handler` self.messages = [] def handler(func): def inner(x): self.messages.append(x) return func(x) return inner def listener(x): print(x) self.msgbus.subscribe("*", listener) self.msgbus.deregister(endpoint="ExecEngine.execute", handler=self.exec_engine.execute) self.msgbus.register(endpoint="ExecEngine.execute", handler=handler(self.exec_engine.execute)) self.msgbus.deregister(endpoint="ExecEngine.process", handler=self.exec_engine.process) self.msgbus.register(endpoint="ExecEngine.process", handler=handler(self.exec_engine.process)) self.msgbus.deregister(endpoint="Portfolio.update_account", handler=self.portfolio.update_account) self.msgbus.register(endpoint="Portfolio.update_account", handler=handler(self.portfolio.update_account)) def _prefill_venue_order_id_to_client_order_id(self, update): order_ids = [ update["id"] for market in update.get("oc", []) for order in market.get("orc", []) for update in order.get("uo", []) ] return { VenueOrderId(oid): ClientOrderId(str(i + 1)) for i, oid in enumerate(order_ids) } async def _setup_account(self): await self.client.connection_account_state() def _setup_exec_client_and_cache(self, update): """ Called before processing a test streaming update - ensure all orders are in the cache in `update`. """ venue_order_ids = self._prefill_venue_order_id_to_client_order_id( update) venue_order_id_to_client_order_id = {} for c_id, v_id in enumerate(venue_order_ids): client_order_id = ClientOrderId(str(c_id)) venue_order_id = VenueOrderId(str(v_id)) self._log.debug( f"Adding client_order_id=[{c_id}], venue_order_id=[{v_id}] ") order = BetfairTestStubs.make_accepted_order( venue_order_id=venue_order_id, client_order_id=client_order_id) self._log.debug(f"created order: {order}") venue_order_id_to_client_order_id[v_id] = order.client_order_id cache_order = self.cache.order( client_order_id=order.client_order_id) self._log.debug(f"Cached order: {order}") if cache_order is None: self._log.debug("Adding order to cache") self.cache.add_order(order, position_id=PositionId(v_id.value)) assert self.cache.order( client_order_id).venue_order_id == venue_order_id self.cache.update_order(order) self.client.venue_order_id_to_client_order_id = venue_order_id_to_client_order_id async def _account_state(self): account_details = await self.betfair_client.get_account_details() account_funds = await self.betfair_client.get_account_funds() timestamp = self.clock.timestamp_ns() account_state = betfair_account_to_account_state( account_detail=account_details, account_funds=account_funds, event_id=self.uuid_factory.generate(), ts_event=timestamp, ts_init=timestamp, ) return account_state @pytest.mark.asyncio async def test_submit_order_success(self): # Arrange command = BetfairTestStubs.submit_order_command() mock_betfair_request(self.betfair_client, BetfairResponses.betting_place_order_success()) # Act self.client.submit_order(command) await asyncio.sleep(0) # Assert submitted, accepted = self.messages assert isinstance(submitted, OrderSubmitted) assert isinstance(accepted, OrderAccepted) assert accepted.venue_order_id == VenueOrderId("228302937743") @pytest.mark.asyncio async def test_submit_order_error(self): # Arrange command = BetfairTestStubs.submit_order_command() mock_betfair_request(self.betfair_client, BetfairResponses.betting_place_order_error()) # Act self.client.submit_order(command) await asyncio.sleep(0) # Assert submitted, rejected = self.messages assert isinstance(submitted, OrderSubmitted) assert isinstance(rejected, OrderRejected) assert rejected.reason == "PERMISSION_DENIED: ERROR_IN_ORDER" @pytest.mark.asyncio async def test_modify_order_success(self): # Arrange venue_order_id = VenueOrderId("240808576108") order = BetfairTestStubs.make_accepted_order( venue_order_id=venue_order_id) command = BetfairTestStubs.modify_order_command( instrument_id=order.instrument_id, client_order_id=order.client_order_id, venue_order_id=venue_order_id, ) mock_betfair_request(self.betfair_client, BetfairResponses.betting_replace_orders_success()) # Act self.cache.add_order(order, PositionId("1")) self.client.modify_order(command) await asyncio.sleep(0) # Assert pending_update, updated = self.messages assert isinstance(pending_update, OrderPendingUpdate) assert isinstance(updated, OrderUpdated) assert updated.price == Price.from_str("0.02000") @pytest.mark.asyncio async def test_modify_order_error_order_doesnt_exist(self): # Arrange venue_order_id = VenueOrderId("229435133092") order = BetfairTestStubs.make_accepted_order( venue_order_id=venue_order_id) command = BetfairTestStubs.modify_order_command( instrument_id=order.instrument_id, client_order_id=order.client_order_id, venue_order_id=venue_order_id, ) mock_betfair_request(self.betfair_client, BetfairResponses.betting_replace_orders_success()) # Act self.client.modify_order(command) await asyncio.sleep(0) # Assert pending_update, rejected = self.messages assert isinstance(pending_update, OrderPendingUpdate) assert isinstance(rejected, OrderModifyRejected) assert rejected.reason == "ORDER NOT IN CACHE" @pytest.mark.asyncio async def test_modify_order_error_no_venue_id(self): # Arrange order = BetfairTestStubs.make_submitted_order() self.cache.add_order(order, position_id=BetfairTestStubs.position_id()) command = BetfairTestStubs.modify_order_command( instrument_id=order.instrument_id, client_order_id=order.client_order_id, venue_order_id="", ) mock_betfair_request(self.betfair_client, BetfairResponses.betting_replace_orders_success()) # Act self.client.modify_order(command) await asyncio.sleep(0) # Assert pending_update, rejected = self.messages assert isinstance(pending_update, OrderPendingUpdate) assert isinstance(rejected, OrderModifyRejected) assert rejected.reason == "ORDER MISSING VENUE_ORDER_ID" @pytest.mark.asyncio async def test_cancel_order_success(self): # Arrange order = BetfairTestStubs.make_submitted_order() self.cache.add_order(order, position_id=BetfairTestStubs.position_id()) command = BetfairTestStubs.cancel_order_command( instrument_id=order.instrument_id, client_order_id=order.client_order_id, venue_order_id=VenueOrderId("240564968665"), ) mock_betfair_request(self.betfair_client, BetfairResponses.betting_cancel_orders_success()) # Act self.client.cancel_order(command) await asyncio.sleep(0) # Assert pending_cancel, cancelled = self.messages assert isinstance(pending_cancel, OrderPendingCancel) assert isinstance(cancelled, OrderCanceled) @pytest.mark.asyncio async def test_cancel_order_fail(self): # Arrange order = BetfairTestStubs.make_submitted_order() self.cache.add_order(order, position_id=BetfairTestStubs.position_id()) command = BetfairTestStubs.cancel_order_command( instrument_id=order.instrument_id, client_order_id=order.client_order_id, venue_order_id=VenueOrderId("228302937743"), ) mock_betfair_request(self.betfair_client, BetfairResponses.betting_cancel_orders_error()) # Act self.client.cancel_order(command) await asyncio.sleep(0) # Assert pending_cancel, cancelled = self.messages assert isinstance(pending_cancel, OrderPendingCancel) assert isinstance(cancelled, OrderCancelRejected) @pytest.mark.asyncio async def test_order_multiple_fills(self): # Arrange self.exec_engine.start() client_order_id = ClientOrderId("1") venue_order_id = VenueOrderId("246938411724") submitted = BetfairTestStubs.make_submitted_order( client_order_id=client_order_id, quantity=Quantity.from_int(20)) self.cache.add_order(submitted, position_id=BetfairTestStubs.position_id()) self.client.venue_order_id_to_client_order_id[ venue_order_id] = client_order_id # Act for update in BetfairStreaming.ocm_multiple_fills(): await self.client._handle_order_stream_update(update) await asyncio.sleep(0.1) # Assert result = [fill.last_qty for fill in self.messages] expected = [ Quantity.from_str("16.1900"), Quantity.from_str("0.77"), Quantity.from_str("0.77"), ] assert result == expected @pytest.mark.asyncio async def test_connection_account_state(self): # Arrange, Act, Assert await self.client.connection_account_state() # Assert assert self.cache.account(self.account_id) @pytest.mark.asyncio async def test_check_account_currency(self): # Arrange, Act, Assert await self.client.check_account_currency() @pytest.mark.asyncio async def test_order_stream_full_image(self): # Arrange update = BetfairStreaming.ocm_FULL_IMAGE() await self._setup_account() self._setup_exec_client_and_cache(update=update) # Act await self.client._handle_order_stream_update(update=update) await asyncio.sleep(0) # Assert assert len(self.messages) == 7 @pytest.mark.asyncio async def test_order_stream_empty_image(self): # Arrange update = BetfairStreaming.ocm_EMPTY_IMAGE() await self._setup_account() self._setup_exec_client_and_cache(update=update) # Act await self.client._handle_order_stream_update(update=update) await asyncio.sleep(0) # Assert assert len(self.messages) == 1 @pytest.mark.asyncio async def test_order_stream_new_full_image(self): update = BetfairStreaming.ocm_NEW_FULL_IMAGE() await self._setup_account() self._setup_exec_client_and_cache(update=update) await self.client._handle_order_stream_update(update=update) await asyncio.sleep(0) assert len(self.messages) == 4 @pytest.mark.asyncio async def test_order_stream_sub_image(self): # Arrange update = BetfairStreaming.ocm_SUB_IMAGE() await self._setup_account() self._setup_exec_client_and_cache(update=update) # Act await self.client._handle_order_stream_update(update=update) await asyncio.sleep(0) # Assert assert len(self.messages) == 1 @pytest.mark.asyncio async def test_order_stream_update(self): # Arrange update = BetfairStreaming.ocm_UPDATE() await self._setup_account() self._setup_exec_client_and_cache(update=update) # Act await self.client._handle_order_stream_update(update=update) await asyncio.sleep(0) # Assert assert len(self.messages) == 2 @pytest.mark.asyncio async def test_order_stream_filled(self): # Arrange update = BetfairStreaming.ocm_FILLED() self._setup_exec_client_and_cache(update) await self._setup_account() # Act await self.client._handle_order_stream_update(update=update) await asyncio.sleep(0) # Assert assert len(self.messages) == 2 assert isinstance(self.messages[1], OrderFilled) assert self.messages[1].last_px == Price.from_str("0.9090909") @pytest.mark.asyncio async def test_order_stream_filled_multiple_prices(self): # Arrange await self._setup_account() update1 = BetfairStreaming.generate_order_update( price="1.50", size=20, side="B", status="E", sm=10, avp="1.60", ) self._setup_exec_client_and_cache(update1) await self.client._handle_order_stream_update(update=update1) await asyncio.sleep(0) order = self.cache.order(client_order_id=ClientOrderId("0")) event = self.messages[-1] order.apply(event) # Act update2 = BetfairStreaming.generate_order_update( price="1.50", size=20, side="B", status="EC", sm=20, avp="1.55", ) self._setup_exec_client_and_cache(update2) await self.client._handle_order_stream_update(update=update2) await asyncio.sleep(0) # Assert assert len(self.messages) == 3 assert isinstance(self.messages[1], OrderFilled) assert isinstance(self.messages[2], OrderFilled) assert self.messages[1].last_px == price_to_probability("1.60") assert self.messages[2].last_px == price_to_probability("1.50") @pytest.mark.asyncio async def test_order_stream_mixed(self): # Arrange update = BetfairStreaming.ocm_MIXED() self._setup_exec_client_and_cache(update) await self._setup_account() # Act await self.client._handle_order_stream_update(update=update) await asyncio.sleep(0) # Assert _, fill1, fill2, cancel = self.messages assert isinstance( fill1, OrderFilled) and fill1.venue_order_id.value == "229430281341" assert isinstance( fill2, OrderFilled) and fill2.venue_order_id.value == "229430281339" assert isinstance( cancel, OrderCanceled) and cancel.venue_order_id.value == "229430281339" @pytest.mark.asyncio @pytest.mark.skip(reason="Not implemented") async def test_generate_order_status_report(self): # Betfair client login orders = await self.betfair_client.list_current_orders() for order in orders: result = await self.client.generate_order_status_report(order=order ) assert result raise NotImplementedError() @pytest.mark.asyncio @pytest.mark.skip async def test_generate_trades_list(self): patch( "betfairlightweight.endpoints.betting.Betting.list_cleared_orders", return_value=BetfairDataProvider.list_cleared_orders( order_id="226125004209"), ) patch.object( self.client, "venue_order_id_to_client_order_id", {"226125004209": ClientOrderId("1")}, ) result = await generate_trades_list(self=self.client, venue_order_id="226125004209", symbol=None, since=None) assert result @pytest.mark.asyncio async def test_duplicate_execution_id(self): # Arrange await self._setup_account() for update in BetfairStreaming.ocm_DUPLICATE_EXECUTION(): self._setup_exec_client_and_cache(update) # # Load submitted orders # for client_order_id in (ClientOrderId('0'), ClientOrderId('1')): # order = BetfairTestStubs.make_order( # price=Price.from_str("0.5"), quantity=Quantity.from_int(10), client_order_id=client_order_id # ) # command = BetfairTestStubs.submit_order_command(order=order) # self.client.submit_order(command) # await asyncio.sleep(0) # Act for update in BetfairStreaming.ocm_DUPLICATE_EXECUTION(): self._setup_exec_client_and_cache(update=update) await self.client._handle_order_stream_update(update=update) await asyncio.sleep(0) # Assert _, fill1, cancel, fill2, fill3 = self.messages # First order example, partial fill followed by remainder canceled assert isinstance(fill1, OrderFilled) assert isinstance(cancel, OrderCanceled) # Second order example, partial fill followed by remainder filled assert (isinstance(fill2, OrderFilled) and fill2.execution_id.value == "4721ad7594e7a4a4dffb1bacb0cb45ccdec0747a") assert (isinstance(fill3, OrderFilled) and fill3.execution_id.value == "8b3e65be779968a3fdf2d72731c848c5153e88cd") @pytest.mark.asyncio async def test_betfair_order_reduces_balance(self): # Arrange self.client.stream = MagicMock() self.exec_engine.start() await asyncio.sleep(1) balance = self.cache.account_for_venue(self.venue).balances()[GBP] order = BetfairTestStubs.make_order(price=Price.from_str("0.5"), quantity=Quantity.from_int(10)) self.cache.add_order(order=order, position_id=None) mock_betfair_request(self.betfair_client, BetfairResponses.betting_place_order_success()) command = BetfairTestStubs.submit_order_command(order=order) self.client.submit_order(command) await asyncio.sleep(0.01) # Act balance_order = self.cache.account_for_venue( BETFAIR_VENUE).balances()[GBP] # Cancel the order, balance should return command = BetfairTestStubs.cancel_order_command( client_order_id=order.client_order_id, venue_order_id=order.venue_order_id) mock_betfair_request(self.betfair_client, BetfairResponses.betting_cancel_orders_success()) self.client.cancel_order(command) await asyncio.sleep(0.1) balance_cancel = self.cache.account_for_venue( BETFAIR_VENUE).balances()[GBP] # Assert assert balance.free == Money(1000.0, GBP) assert balance_order.free == Money(990.0, GBP) assert balance_cancel.free == Money(1000.0, GBP) self.exec_engine.kill() await asyncio.sleep(1) @pytest.mark.asyncio async def test_betfair_order_cancelled_no_timestamp(self): update = BetfairStreaming.ocm_error_fill() self._setup_exec_client_and_cache(update) for upd in update["oc"][0]["orc"][0]["uo"]: self.client._handle_stream_execution_complete_order_update( update=upd) await asyncio.sleep(1) @pytest.mark.asyncio @pytest.mark.parametrize( "price,size,side,status,updates", [ ("1.50", "50", "B", "EC", [{ "sm": 50 }]), ("1.50", "50", "B", "E", [{ "sm": 10 }, { "sm": 15 }]), ], ) async def test_various_betfair_order_fill_scenarios( self, price, size, side, status, updates): # Arrange update = BetfairStreaming.ocm_filled_different_price() self._setup_exec_client_and_cache(update) await self._setup_account() # Act for raw in updates: update = BetfairStreaming.generate_order_update(price=price, size=size, side=side, status=status, **raw) await self.client._handle_order_stream_update(update=update) await asyncio.sleep(0) # Assert assert len(self.messages) == 1 + len(updates) for msg, raw in zip(self.messages[1:], updates): assert isinstance(msg, OrderFilled) assert msg.last_qty == raw["sm"] @pytest.mark.asyncio async def test_order_filled_avp_update(self): # Arrange update = BetfairStreaming.ocm_filled_different_price() self._setup_exec_client_and_cache(update) await self._setup_account() # Act update = BetfairStreaming.generate_order_update(price="1.50", size=20, side="B", status="E", avp="1.50", sm=10) await self.client._handle_order_stream_update(update=update) await asyncio.sleep(0) update = BetfairStreaming.generate_order_update(price="1.30", size=20, side="B", status="E", avp="1.50", sm=10) await self.client._handle_order_stream_update(update=update) await asyncio.sleep(0)
class TradingNode: """ Provides an asynchronous network node for live trading. Parameters ---------- config : TradingNodeConfig, optional The configuration for the instance. Raises ------ TypeError If `config` is not of type `TradingNodeConfig`. """ def __init__(self, config: Optional[TradingNodeConfig] = None): if config is None: config = TradingNodeConfig() PyCondition.not_none(config, "config") PyCondition.type(config, TradingNodeConfig, "config") # Configuration self._config = config # Setup loop self._loop = asyncio.get_event_loop() self._executor = concurrent.futures.ThreadPoolExecutor() self._loop.set_default_executor(self._executor) self._loop.set_debug(config.loop_debug) # Components self._clock = LiveClock(loop=self._loop) self._uuid_factory = UUIDFactory() self.created_time = self._clock.utc_now() self._is_running = False # Identifiers self.trader_id = TraderId(config.trader_id) self.machine_id = socket.gethostname() self.instance_id = self._uuid_factory.generate() # Setup logging self._logger = LiveLogger( loop=self._loop, clock=self._clock, trader_id=self.trader_id, machine_id=self.machine_id, instance_id=self.instance_id, level_stdout=LogLevelParser.from_str_py(config.log_level.upper()), ) self._log = LoggerAdapter( component_name=type(self).__name__, logger=self._logger, ) self._log_header() self._log.info("Building...") if platform.system() != "Windows": # Windows does not support signal handling # https://stackoverflow.com/questions/45987985/asyncio-loops-add-signal-handler-in-windows self._setup_loop() ######################################################################## # Build platform ######################################################################## if config.cache_database is None or config.cache_database.type == "in-memory": cache_db = None elif config.cache_database.type == "redis": cache_db = RedisCacheDatabase( trader_id=self.trader_id, logger=self._logger, serializer=MsgPackSerializer(timestamps_as_str=True), config=config.cache_database, ) else: # pragma: no cover (design-time error) raise ValueError( "The cache_db_type in the configuration is unrecognized, " "can one of {{'in-memory', 'redis'}}.", ) self._msgbus = MessageBus( trader_id=self.trader_id, clock=self._clock, logger=self._logger, ) self._cache = Cache( database=cache_db, logger=self._logger, config=config.cache, ) self.portfolio = Portfolio( msgbus=self._msgbus, cache=self._cache, clock=self._clock, logger=self._logger, ) self._data_engine = LiveDataEngine( loop=self._loop, msgbus=self._msgbus, cache=self._cache, clock=self._clock, logger=self._logger, config=config.data_engine, ) self._exec_engine = LiveExecutionEngine( loop=self._loop, msgbus=self._msgbus, cache=self._cache, clock=self._clock, logger=self._logger, config=config.exec_engine, ) self._exec_engine.load_cache() self._risk_engine = LiveRiskEngine( loop=self._loop, portfolio=self.portfolio, msgbus=self._msgbus, cache=self._cache, clock=self._clock, logger=self._logger, config=config.risk_engine, ) self.trader = Trader( trader_id=self.trader_id, msgbus=self._msgbus, cache=self._cache, portfolio=self.portfolio, data_engine=self._data_engine, risk_engine=self._risk_engine, exec_engine=self._exec_engine, clock=self._clock, logger=self._logger, ) if config.load_strategy_state: self.trader.load() # Setup persistence (requires trader) self.persistence_writers: List[Any] = [] if config.persistence: self._setup_persistence(config=config.persistence) self._builder = TradingNodeBuilder( loop=self._loop, data_engine=self._data_engine, exec_engine=self._exec_engine, msgbus=self._msgbus, cache=self._cache, clock=self._clock, logger=self._logger, log=self._log, ) self._log.info("INITIALIZED.") self.time_to_initialize = self._clock.delta(self.created_time) self._log.info(f"Initialized in {int(self.time_to_initialize.total_seconds() * 1000)}ms.") self._is_built = False @property def is_running(self) -> bool: """ If the trading node is running. Returns ------- bool """ return self._is_running @property def is_built(self) -> bool: """ If the trading node clients are built. Returns ------- bool """ return self._is_built def get_event_loop(self) -> asyncio.AbstractEventLoop: """ Return the event loop of the trading node. Returns ------- asyncio.AbstractEventLoop """ return self._loop def get_logger(self) -> LiveLogger: """ Return the logger for the trading node. Returns ------- LiveLogger """ return self._logger def add_log_sink(self, handler: Callable[[Dict], None]): """ Register the given sink handler with the nodes logger. Parameters ---------- handler : Callable[[Dict], None] The sink handler to register. Raises ------ KeyError If `handler` already registered. """ self._logger.register_sink(handler=handler) def add_data_client_factory(self, name: str, factory): """ Add the given data client factory to the node. Parameters ---------- name : str The name of the client factory. factory : LiveDataClientFactory or LiveExecutionClientFactory The factory to add. Raises ------ ValueError If `name` is not a valid string. KeyError If `name` has already been added. """ self._builder.add_data_client_factory(name, factory) def add_exec_client_factory(self, name: str, factory): """ Add the given execution client factory to the node. Parameters ---------- name : str The name of the client factory. factory : LiveDataClientFactory or LiveExecutionClientFactory The factory to add. Raises ------ ValueError If `name` is not a valid string. KeyError If `name` has already been added. """ self._builder.add_exec_client_factory(name, factory) def build(self) -> None: """ Build the nodes clients. """ if self._is_built: raise RuntimeError("the trading nodes clients are already built.") self._builder.build_data_clients(self._config.data_clients) self._builder.build_exec_clients(self._config.exec_clients) self._is_built = True def start(self) -> Optional[asyncio.Task]: """ Start the trading node. """ if not self._is_built: raise RuntimeError( "The trading nodes clients have not been built. " "Please run `node.build()` prior to start." ) try: if self._loop.is_running(): return self._loop.create_task(self._run()) else: self._loop.run_until_complete(self._run()) return None except RuntimeError as ex: self._log.exception("Error on run", ex) return None def stop(self) -> None: """ Stop the trading node gracefully. After a specified delay the internal `Trader` residuals will be checked. If save strategy is specified then strategy states will then be saved. """ try: if self._loop.is_running(): self._loop.create_task(self._stop()) else: self._loop.run_until_complete(self._stop()) except RuntimeError as ex: self._log.exception("Error on stop", ex) def dispose(self) -> None: """ Dispose of the trading node. Gracefully shuts down the executor and event loop. """ try: timeout = self._clock.utc_now() + timedelta(seconds=self._config.timeout_disconnection) while self._is_running: time.sleep(0.1) if self._clock.utc_now() >= timeout: self._log.warning( f"Timed out ({self._config.timeout_disconnection}s) waiting for node to stop." f"\nStatus" f"\n------" f"\nDataEngine.check_disconnected() == {self._data_engine.check_disconnected()}" f"\nExecEngine.check_disconnected() == {self._exec_engine.check_disconnected()}" ) break self._log.info("DISPOSING...") self._log.debug(f"{self._data_engine.get_run_queue_task()}") self._log.debug(f"{self._exec_engine.get_run_queue_task()}") self._log.debug(f"{self._risk_engine.get_run_queue_task()}") self.trader.dispose() self._data_engine.dispose() self._exec_engine.dispose() self._risk_engine.dispose() self._log.info("Shutting down executor...") if sys.version_info >= (3, 9): # cancel_futures added in Python 3.9 self._executor.shutdown(wait=True, cancel_futures=True) else: self._executor.shutdown(wait=True) self._log.info("Stopping event loop...") self._cancel_all_tasks() self._loop.stop() except RuntimeError as ex: self._log.exception("Error on dispose", ex) finally: if self._loop.is_running(): self._log.warning("Cannot close a running event loop.") else: self._log.info("Closing event loop...") self._loop.close() # Check and log if event loop is running if self._loop.is_running(): self._log.warning(f"loop.is_running={self._loop.is_running()}") else: self._log.info(f"loop.is_running={self._loop.is_running()}") # Check and log if event loop is closed if not self._loop.is_closed(): self._log.warning(f"loop.is_closed={self._loop.is_closed()}") else: self._log.info(f"loop.is_closed={self._loop.is_closed()}") self._log.info("DISPOSED.") def _log_header(self) -> None: nautilus_header(self._log) self._log.info(f"redis {redis.__version__}") # type: ignore self._log.info(f"msgpack {msgpack.version[0]}.{msgpack.version[1]}.{msgpack.version[2]}") if uvloop_version: self._log.info(f"uvloop {uvloop_version}") self._log.info("\033[36m=================================================================") def _setup_loop(self) -> None: if self._loop.is_closed(): self._log.error("Cannot setup signal handling (event loop was closed).") return signal.signal(signal.SIGINT, signal.SIG_DFL) signals = (signal.SIGTERM, signal.SIGINT, signal.SIGABRT) for sig in signals: self._loop.add_signal_handler(sig, self._loop_sig_handler, sig) self._log.debug(f"Event loop signal handling setup for {signals}.") def _setup_persistence(self, config: PersistenceConfig) -> None: # Setup persistence path = f"{config.catalog_path}/live/{self.instance_id}.feather" writer = FeatherWriter( path=path, fs_protocol=config.fs_protocol, flush_interval=config.flush_interval, ) self.persistence_writers.append(writer) self.trader.subscribe("*", writer.write) self._log.info(f"Persisting data & events to {path=}") # Setup logging if config.persist_logs: def sink(record, f): f.write(orjson.dumps(record) + b"\n") path = f"{config.catalog_path}/logs/{self.instance_id}.log" log_sink = open(path, "wb") self.persistence_writers.append(log_sink) self._logger.register_sink(partial(sink, f=log_sink)) self._log.info(f"Persisting logs to {path=}") def _loop_sig_handler(self, sig) -> None: self._loop.remove_signal_handler(signal.SIGTERM) self._loop.add_signal_handler(signal.SIGINT, lambda: None) self._log.warning(f"Received {sig!s}, shutting down...") self.stop() async def _run(self) -> None: try: self._log.info("STARTING...") self._is_running = True # Start system self._logger.start() self._data_engine.start() self._exec_engine.start() self._risk_engine.start() # Connect all clients self._data_engine.connect() self._exec_engine.connect() # Await engine connection and initialization self._log.info( f"Waiting for engines to connect and initialize " f"({self._config.timeout_connection}s timeout)...", color=LogColor.BLUE, ) if not await self._await_engines_connected(): self._log.warning( f"Timed out ({self._config.timeout_connection}s) waiting for engines to connect and initialize." f"\nStatus" f"\n------" f"\nDataEngine.check_connected() == {self._data_engine.check_connected()}" f"\nExecEngine.check_connected() == {self._exec_engine.check_connected()}" ) return self._log.info("Engines connected.", color=LogColor.GREEN) # Await execution state reconciliation self._log.info( f"Waiting for execution state to reconcile " f"({self._config.timeout_reconciliation}s timeout)...", color=LogColor.BLUE, ) if not await self._exec_engine.reconcile_state( timeout_secs=self._config.timeout_reconciliation, ): self._log.error("Execution state could not be reconciled.") return self._log.info("State reconciled.", color=LogColor.GREEN) # Initialize portfolio self.portfolio.initialize_orders() self.portfolio.initialize_positions() # Await portfolio initialization self._log.info( "Waiting for portfolio to initialize " f"({self._config.timeout_portfolio}s timeout)...", color=LogColor.BLUE, ) if not await self._await_portfolio_initialized(): self._log.warning( f"Timed out ({self._config.timeout_portfolio}s) waiting for portfolio to initialize." f"\nStatus" f"\n------" f"\nPortfolio.initialized == {self.portfolio.initialized}" ) return self._log.info("Portfolio initialized.", color=LogColor.GREEN) # Start trader and strategies self.trader.start() if self._loop.is_running(): self._log.info("RUNNING.") else: self._log.warning("Event loop is not running.") # Continue to run while engines are running... await self._data_engine.get_run_queue_task() await self._exec_engine.get_run_queue_task() await self._risk_engine.get_run_queue_task() except asyncio.CancelledError as ex: self._log.error(str(ex)) async def _await_engines_connected(self) -> bool: # - The data engine clients will be set connected when all # instruments are received and updated with the data engine. # - The execution engine clients will be set connected when all # accounts are updated and the current order and position status is # reconciled. # Thus any delay here will be due to blocking network I/O. seconds = self._config.timeout_connection timeout: timedelta = self._clock.utc_now() + timedelta(seconds=seconds) while True: await asyncio.sleep(0) if self._clock.utc_now() >= timeout: return False if not self._data_engine.check_connected(): continue if not self._exec_engine.check_connected(): continue break return True # Engines connected async def _await_portfolio_initialized(self) -> bool: # - The portfolio will be set initialized when all margin and unrealized # PnL calculations are completed (maybe waiting on first quotes). # Thus any delay here will be due to blocking network I/O. seconds = self._config.timeout_portfolio timeout: timedelta = self._clock.utc_now() + timedelta(seconds=seconds) while True: await asyncio.sleep(0) if self._clock.utc_now() >= timeout: return False if not self.portfolio.initialized: continue break return True # Portfolio initialized async def _stop(self) -> None: self._is_stopping = True self._log.info("STOPPING...") if self.trader.is_running: self.trader.stop() self._log.info( f"Awaiting residual state ({self._config.check_residuals_delay}s delay)...", color=LogColor.BLUE, ) await asyncio.sleep(self._config.check_residuals_delay) self.trader.check_residuals() if self._config.save_strategy_state: self.trader.save() # Disconnect all clients self._data_engine.disconnect() self._exec_engine.disconnect() if self._data_engine.is_running: self._data_engine.stop() if self._exec_engine.is_running: self._exec_engine.stop() if self._risk_engine.is_running: self._risk_engine.stop() self._log.info( f"Waiting for engines to disconnect " f"({self._config.timeout_disconnection}s timeout)...", color=LogColor.BLUE, ) if not await self._await_engines_disconnected(): self._log.error( f"Timed out ({self._config.timeout_disconnection}s) waiting for engines to disconnect." f"\nStatus" f"\n------" f"\nDataEngine.check_disconnected() == {self._data_engine.check_disconnected()}" f"\nExecEngine.check_disconnected() == {self._exec_engine.check_disconnected()}" ) # Clean up remaining timers timer_names = self._clock.timer_names() self._clock.cancel_timers() for name in timer_names: self._log.info(f"Cancelled Timer(name={name}).") # Clean up persistence for writer in self.persistence_writers: writer.close() self._log.info("STOPPED.") self._logger.stop() self._is_running = False async def _await_engines_disconnected(self) -> bool: seconds = self._config.timeout_disconnection timeout: timedelta = self._clock.utc_now() + timedelta(seconds=seconds) while True: await asyncio.sleep(0) if self._clock.utc_now() >= timeout: return False if not self._data_engine.check_disconnected(): continue if not self._exec_engine.check_disconnected(): continue break return True # Engines disconnected def _cancel_all_tasks(self) -> None: to_cancel = asyncio.tasks.all_tasks(self._loop) if not to_cancel: self._log.info("All tasks canceled.") return for task in to_cancel: self._log.warning(f"Canceling pending task {task}") task.cancel() if self._loop.is_running(): self._log.warning("Event loop still running during `cancel_all_tasks`.") return finish_all_tasks: asyncio.Future = asyncio.tasks.gather( # type: ignore *to_cancel, loop=self._loop, return_exceptions=True, ) self._loop.run_until_complete(finish_all_tasks) self._log.debug(f"{finish_all_tasks}") for task in to_cancel: # pragma: no cover if task.cancelled(): continue if task.exception() is not None: self._loop.call_exception_handler( { "message": "unhandled exception during asyncio.run() shutdown", "exception": task.exception(), "task": task, } )