def setUp(self): # Fixture Setup usdjpy = TestStubs.instrument_usdjpy() data = BacktestDataContainer() data.add_instrument(usdjpy) data.add_bars(usdjpy.symbol, BarStructure.MINUTE, PriceType.BID, TestDataProvider.usdjpy_1min_bid()[:2000]) data.add_bars(usdjpy.symbol, BarStructure.MINUTE, PriceType.ASK, TestDataProvider.usdjpy_1min_ask()[:2000]) clock = TestClock() guid_factory = TestGuidFactory() logger = TestLogger() trader_id = TraderId('TESTER', '000') account_id = TestStubs.account_id() data_client = BacktestDataClient(data=data, tick_capacity=100, clock=clock, logger=logger) self.portfolio = Portfolio(currency=Currency.USD, clock=clock, guid_factory=guid_factory, logger=logger) self.analyzer = PerformanceAnalyzer() self.exec_db = InMemoryExecutionDatabase(trader_id=trader_id, logger=logger) self.exec_engine = ExecutionEngine(trader_id=trader_id, account_id=account_id, database=self.exec_db, portfolio=self.portfolio, clock=clock, guid_factory=guid_factory, logger=logger) self.exec_client = BacktestExecClient( exec_engine=self.exec_engine, instruments={usdjpy.symbol: usdjpy}, config=BacktestConfig(), fill_model=FillModel(), clock=clock, guid_factory=guid_factory, logger=logger) self.exec_engine.register_client(self.exec_client) strategies = [EmptyStrategy('001'), EmptyStrategy('002')] self.trader = Trader(trader_id=trader_id, account_id=account_id, strategies=strategies, data_client=data_client, exec_engine=self.exec_engine, clock=clock, guid_factory=guid_factory, logger=logger)
def __init__( self, strategies: List[TradingStrategy], config: Dict[str, object], ): """ Initialize a new instance of the TradingNode class. Parameters ---------- strategies : list[TradingStrategy] The list of strategies to run on the trading node. config : dict[str, object] The configuration for the trading node. """ if strategies is None: strategies = [] config_trader = config.get("trader", {}) config_log = config.get("logging", {}) config_exec_db = config.get("exec_database", {}) config_strategy = config.get("strategy", {}) config_data_clients = config.get("data_clients", {}) config_exec_clients = config.get("exec_clients", {}) self._clock = LiveClock() self._uuid_factory = UUIDFactory() self._loop = asyncio.get_event_loop() self._executor = concurrent.futures.ThreadPoolExecutor() self._loop.set_default_executor(self._executor) self._loop.set_debug(False) # TODO: Development self._is_running = False # Setup identifiers self.trader_id = TraderId( name=config_trader["name"], tag=config_trader["id_tag"], ) # Setup logging logger = LiveLogger( clock=self._clock, name=self.trader_id.value, level_console=LogLevelParser.from_str_py( config_log.get("log_level_console")), level_file=LogLevelParser.from_str_py( config_log.get("log_level_file")), level_store=LogLevelParser.from_str_py( config_log.get("log_level_store")), log_thread=config_log.get("log_thread_id", True), log_to_file=config_log.get("log_to_file", False), log_file_path=config_log.get("log_file_path", ""), ) self._log = LoggerAdapter(component_name=self.__class__.__name__, logger=logger) self._log_header() self._log.info("Building...") self.portfolio = Portfolio( clock=self._clock, logger=logger, ) self._data_engine = LiveDataEngine( loop=self._loop, portfolio=self.portfolio, clock=self._clock, logger=logger, ) self.portfolio.register_cache(self._data_engine.cache) self.analyzer = PerformanceAnalyzer() if config_exec_db["type"] == "redis": exec_db = RedisExecutionDatabase( trader_id=self.trader_id, logger=logger, command_serializer=MsgPackCommandSerializer(), event_serializer=MsgPackEventSerializer(), config={ "host": config_exec_db["host"], "port": config_exec_db["port"], }) else: exec_db = BypassExecutionDatabase( trader_id=self.trader_id, logger=logger, ) self._exec_engine = LiveExecutionEngine( loop=self._loop, database=exec_db, portfolio=self.portfolio, clock=self._clock, logger=logger, ) self._exec_engine.load_cache() self._setup_data_clients(config_data_clients, logger) self._setup_exec_clients(config_exec_clients, logger) self.trader = Trader( trader_id=self.trader_id, strategies=strategies, data_engine=self._data_engine, exec_engine=self._exec_engine, clock=self._clock, logger=logger, ) self._check_residuals_delay = 2.0 # Hard coded delay (refactor) self._load_strategy_state = config_strategy.get("load_state", True) self._save_strategy_state = config_strategy.get("save_state", True) if self._load_strategy_state: self.trader.load() self._setup_loop() self._log.info("state=INITIALIZED.")
class TradingNode: """ Provides an asynchronous network node for live trading. """ def __init__( self, strategies: List[TradingStrategy], config: Dict[str, object], ): """ Initialize a new instance of the TradingNode class. Parameters ---------- strategies : list[TradingStrategy] The list of strategies to run on the trading node. config : dict[str, object] The configuration for the trading node. """ if strategies is None: strategies = [] config_trader = config.get("trader", {}) config_log = config.get("logging", {}) config_exec_db = config.get("exec_database", {}) config_strategy = config.get("strategy", {}) config_data_clients = config.get("data_clients", {}) config_exec_clients = config.get("exec_clients", {}) self._clock = LiveClock() self._uuid_factory = UUIDFactory() self._loop = asyncio.get_event_loop() self._executor = concurrent.futures.ThreadPoolExecutor() self._loop.set_default_executor(self._executor) self._loop.set_debug(False) # TODO: Development self._is_running = False # Setup identifiers self.trader_id = TraderId( name=config_trader["name"], tag=config_trader["id_tag"], ) # Setup logging logger = LiveLogger( clock=self._clock, name=self.trader_id.value, level_console=LogLevelParser.from_str_py( config_log.get("log_level_console")), level_file=LogLevelParser.from_str_py( config_log.get("log_level_file")), level_store=LogLevelParser.from_str_py( config_log.get("log_level_store")), log_thread=config_log.get("log_thread_id", True), log_to_file=config_log.get("log_to_file", False), log_file_path=config_log.get("log_file_path", ""), ) self._log = LoggerAdapter(component_name=self.__class__.__name__, logger=logger) self._log_header() self._log.info("Building...") self.portfolio = Portfolio( clock=self._clock, logger=logger, ) self._data_engine = LiveDataEngine( loop=self._loop, portfolio=self.portfolio, clock=self._clock, logger=logger, ) self.portfolio.register_cache(self._data_engine.cache) self.analyzer = PerformanceAnalyzer() if config_exec_db["type"] == "redis": exec_db = RedisExecutionDatabase( trader_id=self.trader_id, logger=logger, command_serializer=MsgPackCommandSerializer(), event_serializer=MsgPackEventSerializer(), config={ "host": config_exec_db["host"], "port": config_exec_db["port"], }) else: exec_db = BypassExecutionDatabase( trader_id=self.trader_id, logger=logger, ) self._exec_engine = LiveExecutionEngine( loop=self._loop, database=exec_db, portfolio=self.portfolio, clock=self._clock, logger=logger, ) self._exec_engine.load_cache() self._setup_data_clients(config_data_clients, logger) self._setup_exec_clients(config_exec_clients, logger) self.trader = Trader( trader_id=self.trader_id, strategies=strategies, data_engine=self._data_engine, exec_engine=self._exec_engine, clock=self._clock, logger=logger, ) self._check_residuals_delay = 2.0 # Hard coded delay (refactor) self._load_strategy_state = config_strategy.get("load_state", True) self._save_strategy_state = config_strategy.get("save_state", True) if self._load_strategy_state: self.trader.load() self._setup_loop() self._log.info("state=INITIALIZED.") def get_event_loop(self): """ Return the event loop of the trading node. Returns ------- asyncio.AbstractEventLoop """ return self._loop def start(self): """ Start the trading node. """ try: if self._loop.is_running(): self._loop.create_task(self._run()) else: self._loop.run_until_complete(self._run()) except RuntimeError as ex: self._log.exception(ex) def stop(self): """ Stop the trading node gracefully. After a specified delay the internal `Trader` residuals will be checked. If save strategy is specified then strategy states will then be saved. """ try: if self._loop.is_running(): self._loop.create_task(self._stop()) else: self._loop.run_until_complete(self._stop()) except RuntimeError as ex: self._log.exception(ex) # noinspection PyTypeChecker # Expected timedelta, got datetime.pyi instead def dispose(self): """ Dispose of the trading node. Gracefully shuts down the executor and event loop. """ try: timeout = self._clock.utc_now() + timedelta(seconds=5) while self._is_running: time.sleep(0.1) if self._clock.utc_now() >= timeout: self._log.warning( "Timed out (5s) waiting for node to stop.") break self._log.info("state=DISPOSING...") self.trader.dispose() self._data_engine.dispose() self._exec_engine.dispose() self._log.info("Shutting down executor...") if is_ge_python_version(major=3, minor=9): # cancel_futures added in Python 3.9 self._executor.shutdown(wait=True, cancel_futures=True) else: self._executor.shutdown(wait=True) self._log.info("Stopping event loop...") self._loop.stop() if is_ge_python_version(major=3, minor=7): # tasks.all_tasks added in Python 3.7 self._cancel_all_tasks() except RuntimeError as ex: self._log.exception(ex) finally: if self._loop.is_running(): self._log.warning("Cannot close a running event loop.") else: self._log.info("Closing event loop...") self._loop.close() # Check and log if event loop is running if self._loop.is_running(): self._log.warning(f"loop.is_running={self._loop.is_running()}") else: self._log.info(f"loop.is_running={self._loop.is_running()}") # Check and log if event loop is closed if not self._loop.is_closed(): self._log.warning(f"loop.is_closed={self._loop.is_closed()}") else: self._log.info(f"loop.is_closed={self._loop.is_closed()}") self._log.info("state=DISPOSED.") time.sleep(0.1) # Assist final logging to daemonic logging thread def _log_header(self): nautilus_header(self._log) self._log.info(f"redis {redis.__version__}") self._log.info( f"msgpack {msgpack.version[0]}.{msgpack.version[1]}.{msgpack.version[2]}" ) if uvloop_version: self._log.info(f"uvloop {uvloop_version}") self._log.info( "=================================================================" ) def _setup_data_clients(self, config, logger): try: # Setup each data for name, config in config.items(): if name == "binance": data_client = BinanceDataClientFactory.create( config=config, data_engine=self._data_engine, clock=self._clock, logger=logger, ) self._data_engine.register_client(data_client) elif name == "oanda": data_client = OandaDataClientFactory.create( config=config, data_engine=self._data_engine, clock=self._clock, logger=logger, ) self._data_engine.register_client(data_client) else: self._log.error(f"No DataClient available for `{name}`.") except RuntimeError as ex: self._log.exception(ex) def _setup_exec_clients(self, config, logger): try: # Setup each data for name, config in config.items(): if name == "binance": pass elif name == "oanda": pass else: self._log.error( f"No ExecutionClient available for `{name}`.") except RuntimeError as ex: self._log.exception(ex) def _setup_loop(self): signal.signal(signal.SIGINT, signal.SIG_DFL) signals = (signal.SIGTERM, signal.SIGINT) if self._loop.is_closed(): self._log.error( "Cannot setup signal handling (event loop was closed).") return for sig in signals: self._loop.add_signal_handler(sig, self._loop_sig_handler, sig) self._log.debug(f"Event loop {signals} handling setup.") def _loop_sig_handler(self, sig): self._loop.remove_signal_handler(signal.SIGTERM) self._loop.add_signal_handler(signal.SIGINT, lambda: None) self._log.warning(f"Received {sig!s}, shutting down...") self.stop() async def _run(self): try: self._log.info("state=STARTING...") self._data_engine.start() self._exec_engine.start() # Wait for engines to initialize (will hang if never initialized) await self._loop.run_in_executor(None, self._wait_for_engines) self.trader.start() if self._loop.is_running(): self._log.info("state=RUNNING.") else: self._log.warning("Event loop is not running.") self._is_running = True # Continue to run loop while engines are running... await self._data_engine.get_run_queue_task() await self._exec_engine.get_run_queue_task() except asyncio.exceptions.CancelledError as ex: self._log.error(str(ex)) def _wait_for_engines(self): self._log.info("Waiting for engines to initialize...") # The engines require that all of their clients are initialized. # The data engine clients will be set as initialized when all # instruments are received and updated with the data engine. # The execution engine clients will be set as initialized when all # accounts are updated and the current order and position status is # confirmed. Thus any delay here will be due to blocking network IO. while True: time.sleep(0.1) if not self._data_engine.check_initialized(): continue if not self._exec_engine.check_initialized(): continue return True # Engines initialized async def _stop(self): self._is_stopping = True self._log.info("state=STOPPING...") self.trader.stop() self._log.info("Awaiting residual state...") await asyncio.sleep(self._check_residuals_delay) self.trader.check_residuals() if self._save_strategy_state: self.trader.save() self._data_engine.stop() self._exec_engine.stop() await self._data_engine.get_run_queue_task() await self._exec_engine.get_run_queue_task() self._log.debug(f"{self._data_engine.get_run_queue_task()}") self._log.debug(f"{self._exec_engine.get_run_queue_task()}") # Clean up remaining timers timer_names = self._clock.timer_names() self._clock.cancel_timers() for name in timer_names: self._log.info(f"Cancelled Timer(name={name}).") self._log.info("state=STOPPED.") self._is_running = False def _cancel_all_tasks(self): to_cancel = tasks.all_tasks(self._loop) if not to_cancel: self._log.info("All tasks finished.") return for task in to_cancel: self._log.warning(f"Cancelling pending task {task}") task.cancel() if self._loop.is_running(): self._log.warning( "Event loop still running during `cancel_all_tasks`.") return finish_all_tasks = tasks.gather(*to_cancel, loop=self._loop, return_exceptions=True) self._loop.run_until_complete(finish_all_tasks) self._log.debug(f"{finish_all_tasks}") for task in to_cancel: if task.cancelled(): continue if task.exception() is not None: self._loop.call_exception_handler({ 'message': 'unhandled exception during asyncio.run() shutdown', 'exception': task.exception(), 'task': task, })
def setUp(self): # Fixture Setup clock = TestClock() logger = Logger(clock) trader_id = TraderId("TESTER", "000") account_id = TestStubs.account_id() self.portfolio = Portfolio( clock=clock, logger=logger, ) self.data_engine = DataEngine( portfolio=self.portfolio, clock=clock, logger=logger, config={"use_previous_close": False}, ) self.portfolio.register_cache(self.data_engine.cache) self.analyzer = PerformanceAnalyzer() self.exec_db = BypassExecutionDatabase( trader_id=trader_id, logger=logger, ) self.exec_engine = ExecutionEngine( database=self.exec_db, portfolio=self.portfolio, clock=clock, logger=logger, ) self.exchange = SimulatedExchange( venue=Venue("SIM"), oms_type=OMSType.HEDGING, is_frozen_account=False, starting_balances=[Money(1_000_000, USD)], exec_cache=self.exec_engine.cache, instruments=[USDJPY_SIM], modules=[], fill_model=FillModel(), clock=clock, logger=logger, ) self.data_client = BacktestMarketDataClient( instruments=[USDJPY_SIM], client_id=ClientId("SIM"), engine=self.data_engine, clock=clock, logger=logger, ) self.data_engine.register_client(self.data_client) self.exec_client = BacktestExecClient( exchange=self.exchange, account_id=account_id, engine=self.exec_engine, clock=clock, logger=logger, ) self.risk_engine = RiskEngine( exec_engine=self.exec_engine, portfolio=self.portfolio, clock=clock, logger=logger, ) self.exec_engine.register_risk_engine(self.risk_engine) self.exec_engine.register_client(self.exec_client) strategies = [ TradingStrategy("001"), TradingStrategy("002"), ] self.trader = Trader( trader_id=trader_id, strategies=strategies, portfolio=self.portfolio, data_engine=self.data_engine, exec_engine=self.exec_engine, risk_engine=self.risk_engine, clock=clock, logger=logger, )
class TraderTests(unittest.TestCase): def setUp(self): # Fixture Setup clock = TestClock() logger = Logger(clock) trader_id = TraderId("TESTER", "000") account_id = TestStubs.account_id() self.portfolio = Portfolio( clock=clock, logger=logger, ) self.data_engine = DataEngine( portfolio=self.portfolio, clock=clock, logger=logger, config={"use_previous_close": False}, ) self.portfolio.register_cache(self.data_engine.cache) self.analyzer = PerformanceAnalyzer() self.exec_db = BypassExecutionDatabase( trader_id=trader_id, logger=logger, ) self.exec_engine = ExecutionEngine( database=self.exec_db, portfolio=self.portfolio, clock=clock, logger=logger, ) self.exchange = SimulatedExchange( venue=Venue("SIM"), oms_type=OMSType.HEDGING, is_frozen_account=False, starting_balances=[Money(1_000_000, USD)], exec_cache=self.exec_engine.cache, instruments=[USDJPY_SIM], modules=[], fill_model=FillModel(), clock=clock, logger=logger, ) self.data_client = BacktestMarketDataClient( instruments=[USDJPY_SIM], client_id=ClientId("SIM"), engine=self.data_engine, clock=clock, logger=logger, ) self.data_engine.register_client(self.data_client) self.exec_client = BacktestExecClient( exchange=self.exchange, account_id=account_id, engine=self.exec_engine, clock=clock, logger=logger, ) self.risk_engine = RiskEngine( exec_engine=self.exec_engine, portfolio=self.portfolio, clock=clock, logger=logger, ) self.exec_engine.register_risk_engine(self.risk_engine) self.exec_engine.register_client(self.exec_client) strategies = [ TradingStrategy("001"), TradingStrategy("002"), ] self.trader = Trader( trader_id=trader_id, strategies=strategies, portfolio=self.portfolio, data_engine=self.data_engine, exec_engine=self.exec_engine, risk_engine=self.risk_engine, clock=clock, logger=logger, ) def test_initialize_trader(self): # Arrange # Act trader_id = self.trader.id # Assert self.assertEqual(TraderId("TESTER", "000"), trader_id) self.assertEqual(IdTag("000"), trader_id.tag) self.assertEqual(ComponentState.INITIALIZED, self.trader.state) self.assertEqual(2, len(self.trader.strategy_states())) def test_get_strategy_states(self): # Arrange # Act status = self.trader.strategy_states() # Assert self.assertTrue(StrategyId("TradingStrategy", "001") in status) self.assertTrue(StrategyId("TradingStrategy", "002") in status) self.assertEqual("INITIALIZED", status[StrategyId("TradingStrategy", "001")]) self.assertEqual("INITIALIZED", status[StrategyId("TradingStrategy", "002")]) self.assertEqual(2, len(status)) def test_change_strategies(self): # Arrange strategies = [ TradingStrategy("003"), TradingStrategy("004"), ] # Act self.trader.initialize_strategies(strategies, warn_no_strategies=True) # Assert self.assertTrue(strategies[0].id in self.trader.strategy_states()) self.assertTrue(strategies[1].id in self.trader.strategy_states()) self.assertEqual(2, len(self.trader.strategy_states())) def test_trader_detects_duplicate_identifiers(self): # Arrange strategies = [ TradingStrategy("000"), TradingStrategy("000"), ] # Act self.assertRaises( ValueError, self.trader.initialize_strategies, strategies, True, ) def test_start_a_trader(self): # Arrange # Act self.trader.start() strategy_states = self.trader.strategy_states() # Assert self.assertEqual(ComponentState.RUNNING, self.trader.state) self.assertEqual("RUNNING", strategy_states[StrategyId("TradingStrategy", "001")]) self.assertEqual("RUNNING", strategy_states[StrategyId("TradingStrategy", "002")]) def test_stop_a_running_trader(self): # Arrange self.trader.start() # Act self.trader.stop() strategy_states = self.trader.strategy_states() # Assert self.assertEqual(ComponentState.STOPPED, self.trader.state) self.assertEqual("STOPPED", strategy_states[StrategyId("TradingStrategy", "001")]) self.assertEqual("STOPPED", strategy_states[StrategyId("TradingStrategy", "002")])
def __init__( self, strategies: List[TradingStrategy], config: Dict[str, object], ): """ Initialize a new instance of the TradingNode class. Parameters ---------- strategies : list[TradingStrategy] The list of strategies to run on the trading node. config : dict[str, object] The configuration for the trading node. Raises ------ ValueError If strategies is None or empty. ValueError If config is None or empty. """ PyCondition.not_none(strategies, "strategies") PyCondition.not_none(config, "config") PyCondition.not_empty(strategies, "strategies") PyCondition.not_empty(config, "config") self._config = config # Extract configs config_trader = config.get("trader", {}) config_system = config.get("system", {}) config_log = config.get("logging", {}) config_exec_db = config.get("exec_database", {}) config_risk = config.get("risk", {}) config_strategy = config.get("strategy", {}) # System config self._connection_timeout = config_system.get("connection_timeout", 5.0) self._disconnection_timeout = config_system.get( "disconnection_timeout", 5.0) self._check_residuals_delay = config_system.get( "check_residuals_delay", 5.0) self._load_strategy_state = config_strategy.get("load_state", True) self._save_strategy_state = config_strategy.get("save_state", True) # Setup loop self._loop = asyncio.get_event_loop() self._executor = concurrent.futures.ThreadPoolExecutor() self._loop.set_default_executor(self._executor) self._loop.set_debug(config_system.get("loop_debug", False)) # Components self._clock = LiveClock(loop=self._loop) self._uuid_factory = UUIDFactory() self.system_id = self._uuid_factory.generate() self.created_time = self._clock.utc_now() self._is_running = False # Setup identifiers self.trader_id = TraderId( name=config_trader["name"], tag=config_trader["id_tag"], ) # Setup logging level_stdout = LogLevelParser.from_str_py( config_log.get("level_stdout")) self._logger = LiveLogger( loop=self._loop, clock=self._clock, trader_id=self.trader_id, system_id=self.system_id, level_stdout=level_stdout, ) self._log = LoggerAdapter( component=self.__class__.__name__, logger=self._logger, ) self._log_header() self._log.info("Building...") if platform.system() != "Windows": # Requires the logger to be initialized # Windows does not support signal handling # https://stackoverflow.com/questions/45987985/asyncio-loops-add-signal-handler-in-windows self._setup_loop() # Build platform # ---------------------------------------------------------------------- self.portfolio = Portfolio( clock=self._clock, logger=self._logger, ) self._data_engine = LiveDataEngine( loop=self._loop, portfolio=self.portfolio, clock=self._clock, logger=self._logger, config={"qsize": 10000}, ) self.portfolio.register_cache(self._data_engine.cache) self.analyzer = PerformanceAnalyzer() if config_exec_db["type"] == "redis": exec_db = RedisExecutionDatabase( trader_id=self.trader_id, logger=self._logger, command_serializer=MsgPackCommandSerializer(), event_serializer=MsgPackEventSerializer(), config={ "host": config_exec_db["host"], "port": config_exec_db["port"], }, ) else: exec_db = BypassExecutionDatabase( trader_id=self.trader_id, logger=self._logger, ) self._exec_engine = LiveExecutionEngine( loop=self._loop, database=exec_db, portfolio=self.portfolio, clock=self._clock, logger=self._logger, config={"qsize": 10000}, ) self._risk_engine = LiveRiskEngine( loop=self._loop, exec_engine=self._exec_engine, portfolio=self.portfolio, clock=self._clock, logger=self._logger, config=config_risk, ) self._exec_engine.load_cache() self._exec_engine.register_risk_engine(self._risk_engine) self.trader = Trader( trader_id=self.trader_id, strategies=strategies, portfolio=self.portfolio, data_engine=self._data_engine, exec_engine=self._exec_engine, risk_engine=self._risk_engine, clock=self._clock, logger=self._logger, ) if self._load_strategy_state: self.trader.load() self._builder = TradingNodeBuilder( data_engine=self._data_engine, exec_engine=self._exec_engine, risk_engine=self._risk_engine, clock=self._clock, logger=self._logger, log=self._log, ) self._log.info("state=INITIALIZED.") self.time_to_initialize = self._clock.delta(self.created_time) self._log.info( f"Initialized in {self.time_to_initialize.total_seconds():.3f}s.") self._is_built = False
class TradingNode: """ Provides an asynchronous network node for live trading. """ def __init__( self, strategies: List[TradingStrategy], config: Dict[str, object], ): """ Initialize a new instance of the TradingNode class. Parameters ---------- strategies : list[TradingStrategy] The list of strategies to run on the trading node. config : dict[str, object] The configuration for the trading node. Raises ------ ValueError If strategies is None or empty. ValueError If config is None or empty. """ PyCondition.not_none(strategies, "strategies") PyCondition.not_none(config, "config") PyCondition.not_empty(strategies, "strategies") PyCondition.not_empty(config, "config") self._config = config # Extract configs config_trader = config.get("trader", {}) config_system = config.get("system", {}) config_log = config.get("logging", {}) config_exec_db = config.get("exec_database", {}) config_risk = config.get("risk", {}) config_strategy = config.get("strategy", {}) # System config self._connection_timeout = config_system.get("connection_timeout", 5.0) self._disconnection_timeout = config_system.get( "disconnection_timeout", 5.0) self._check_residuals_delay = config_system.get( "check_residuals_delay", 5.0) self._load_strategy_state = config_strategy.get("load_state", True) self._save_strategy_state = config_strategy.get("save_state", True) # Setup loop self._loop = asyncio.get_event_loop() self._executor = concurrent.futures.ThreadPoolExecutor() self._loop.set_default_executor(self._executor) self._loop.set_debug(config_system.get("loop_debug", False)) # Components self._clock = LiveClock(loop=self._loop) self._uuid_factory = UUIDFactory() self.system_id = self._uuid_factory.generate() self.created_time = self._clock.utc_now() self._is_running = False # Setup identifiers self.trader_id = TraderId( name=config_trader["name"], tag=config_trader["id_tag"], ) # Setup logging level_stdout = LogLevelParser.from_str_py( config_log.get("level_stdout")) self._logger = LiveLogger( loop=self._loop, clock=self._clock, trader_id=self.trader_id, system_id=self.system_id, level_stdout=level_stdout, ) self._log = LoggerAdapter( component=self.__class__.__name__, logger=self._logger, ) self._log_header() self._log.info("Building...") if platform.system() != "Windows": # Requires the logger to be initialized # Windows does not support signal handling # https://stackoverflow.com/questions/45987985/asyncio-loops-add-signal-handler-in-windows self._setup_loop() # Build platform # ---------------------------------------------------------------------- self.portfolio = Portfolio( clock=self._clock, logger=self._logger, ) self._data_engine = LiveDataEngine( loop=self._loop, portfolio=self.portfolio, clock=self._clock, logger=self._logger, config={"qsize": 10000}, ) self.portfolio.register_cache(self._data_engine.cache) self.analyzer = PerformanceAnalyzer() if config_exec_db["type"] == "redis": exec_db = RedisExecutionDatabase( trader_id=self.trader_id, logger=self._logger, command_serializer=MsgPackCommandSerializer(), event_serializer=MsgPackEventSerializer(), config={ "host": config_exec_db["host"], "port": config_exec_db["port"], }, ) else: exec_db = BypassExecutionDatabase( trader_id=self.trader_id, logger=self._logger, ) self._exec_engine = LiveExecutionEngine( loop=self._loop, database=exec_db, portfolio=self.portfolio, clock=self._clock, logger=self._logger, config={"qsize": 10000}, ) self._risk_engine = LiveRiskEngine( loop=self._loop, exec_engine=self._exec_engine, portfolio=self.portfolio, clock=self._clock, logger=self._logger, config=config_risk, ) self._exec_engine.load_cache() self._exec_engine.register_risk_engine(self._risk_engine) self.trader = Trader( trader_id=self.trader_id, strategies=strategies, portfolio=self.portfolio, data_engine=self._data_engine, exec_engine=self._exec_engine, risk_engine=self._risk_engine, clock=self._clock, logger=self._logger, ) if self._load_strategy_state: self.trader.load() self._builder = TradingNodeBuilder( data_engine=self._data_engine, exec_engine=self._exec_engine, risk_engine=self._risk_engine, clock=self._clock, logger=self._logger, log=self._log, ) self._log.info("state=INITIALIZED.") self.time_to_initialize = self._clock.delta(self.created_time) self._log.info( f"Initialized in {self.time_to_initialize.total_seconds():.3f}s.") self._is_built = False @property def is_running(self) -> bool: """ If the trading node is running. Returns ------- bool True if running, else False. """ return self._is_running @property def is_built(self) -> bool: """ If the trading node clients are built. Returns ------- bool True if built, else False. """ return self._is_built def get_event_loop(self) -> asyncio.AbstractEventLoop: """ Return the event loop of the trading node. Returns ------- asyncio.AbstractEventLoop """ return self._loop def get_logger(self) -> LiveLogger: """ Return the logger for the trading node. Returns ------- LiveLogger """ return self._logger def add_data_client_factory(self, name, factory): """ Add the given data client factory to the node. Parameters ---------- name : str The name of the client factory. factory : LiveDataClientFactory or LiveExecutionClientFactory The factory to add. Raises ------ ValueError If name is not a valid string. KeyError If name has already been added. """ self._builder.add_data_client_factory(name, factory) def add_exec_client_factory(self, name, factory): """ Add the given execution client factory to the node. Parameters ---------- name : str The name of the client factory. factory : LiveDataClientFactory or LiveExecutionClientFactory The factory to add. Raises ------ ValueError If name is not a valid string. KeyError If name has already been added. """ self._builder.add_exec_client_factory(name, factory) def build(self) -> None: """ Build the nodes clients. """ if self._is_built: raise RuntimeError("The trading nodes clients are already built.") self._builder.build_data_clients(self._config.get("data_clients")) self._builder.build_exec_clients(self._config.get("exec_clients")) self._is_built = True def start(self) -> None: """ Start the trading node. """ if not self._is_built: raise RuntimeError( "The trading nodes clients have not been built. " "Please run `node.build()` prior to start.") try: if self._loop.is_running(): self._loop.create_task(self._run()) else: self._loop.run_until_complete(self._run()) except RuntimeError as ex: self._log.exception(ex) def stop(self) -> None: """ Stop the trading node gracefully. After a specified delay the internal `Trader` residuals will be checked. If save strategy is specified then strategy states will then be saved. """ try: if self._loop.is_running(): self._loop.create_task(self._stop()) else: self._loop.run_until_complete(self._stop()) except RuntimeError as ex: self._log.exception(ex) def dispose(self) -> None: """ Dispose of the trading node. Gracefully shuts down the executor and event loop. """ try: timeout = self._clock.utc_now() + timedelta(seconds=5) while self._is_running: time.sleep(0.1) if self._clock.utc_now() >= timeout: self._log.warning( "Timed out (5s) waiting for node to stop.") break self._log.info("state=DISPOSING...") self._log.debug(f"{self._data_engine.get_run_queue_task()}") self._log.debug(f"{self._exec_engine.get_run_queue_task()}") self._log.debug(f"{self._risk_engine.get_run_queue_task()}") self.trader.dispose() self._data_engine.dispose() self._exec_engine.dispose() self._risk_engine.dispose() self._log.info("Shutting down executor...") if sys.version_info >= (3, 9): # cancel_futures added in Python 3.9 self._executor.shutdown(wait=True, cancel_futures=True) else: self._executor.shutdown(wait=True) self._log.info("Stopping event loop...") self._cancel_all_tasks() self._logger.stop() self._loop.stop() except RuntimeError as ex: self._log.exception(ex) finally: if self._loop.is_running(): self._log.warning("Cannot close a running event loop.") else: self._log.info("Closing event loop...") self._loop.close() # Check and log if event loop is running if self._loop.is_running(): self._log.warning(f"loop.is_running={self._loop.is_running()}") else: self._log.info(f"loop.is_running={self._loop.is_running()}") # Check and log if event loop is closed if not self._loop.is_closed(): self._log.warning(f"loop.is_closed={self._loop.is_closed()}") else: self._log.info(f"loop.is_closed={self._loop.is_closed()}") self._log.info("state=DISPOSED.") def _log_header(self) -> None: nautilus_header(self._log) self._log.info(f"redis {redis.__version__}") self._log.info( f"msgpack {msgpack.version[0]}.{msgpack.version[1]}.{msgpack.version[2]}" ) if uvloop_version: self._log.info(f"uvloop {uvloop_version}") self._log.info( "=================================================================" ) def _setup_loop(self) -> None: if self._loop.is_closed(): self._log.error( "Cannot setup signal handling (event loop was closed).") return signal.signal(signal.SIGINT, signal.SIG_DFL) signals = (signal.SIGTERM, signal.SIGINT, signal.SIGABRT) for sig in signals: self._loop.add_signal_handler(sig, self._loop_sig_handler, sig) self._log.debug(f"Event loop {signals} handling setup.") def _loop_sig_handler(self, sig: signal.signal) -> None: self._loop.remove_signal_handler(signal.SIGTERM) self._loop.add_signal_handler(signal.SIGINT, lambda: None) self._log.warning(f"Received {sig!s}, shutting down...") self.stop() async def _run(self) -> None: try: self._log.info("state=STARTING...") self._is_running = True self._logger.start() self._data_engine.start() self._exec_engine.start() self._risk_engine.start() result: bool = await self._await_engines_connected() if not result: return result: bool = await self._exec_engine.reconcile_state() if not result: return self.trader.start() if self._loop.is_running(): self._log.info("state=RUNNING.") else: self._log.warning("Event loop is not running.") # Continue to run while engines are running... await self._data_engine.get_run_queue_task() await self._exec_engine.get_run_queue_task() await self._risk_engine.get_run_queue_task() except asyncio.CancelledError as ex: self._log.error(str(ex)) async def _await_engines_connected(self) -> bool: self._log.info(f"Waiting for engines to initialize " f"({self._connection_timeout}s timeout)...") # The data engine clients will be set as connected when all # instruments are received and updated with the data engine. # The execution engine clients will be set as connected when all # accounts are updated and the current order and position status is # reconciled. Thus any delay here will be due to blocking network IO. seconds = self._connection_timeout timeout: timedelta = self._clock.utc_now() + timedelta(seconds=seconds) while True: await asyncio.sleep(0) if self._clock.utc_now() >= timeout: self._log.error( f"Timed out ({seconds}s) waiting for engines to connect.") return False if not self._data_engine.check_connected(): continue if not self._exec_engine.check_connected(): continue break return True # Engines connected async def _stop(self) -> None: self._is_stopping = True self._log.info("state=STOPPING...") if self.trader.state == ComponentState.RUNNING: self.trader.stop() self._log.info( f"Awaiting residual state ({self._check_residuals_delay}s delay)..." ) await asyncio.sleep(self._check_residuals_delay) self.trader.check_residuals() if self._save_strategy_state: self.trader.save() if self._data_engine.state == ComponentState.RUNNING: self._data_engine.stop() if self._exec_engine.state == ComponentState.RUNNING: self._exec_engine.stop() if self._risk_engine.state == ComponentState.RUNNING: self._risk_engine.stop() await self._await_engines_disconnected() # Clean up remaining timers timer_names = self._clock.timer_names() self._clock.cancel_timers() for name in timer_names: self._log.info(f"Cancelled Timer(name={name}).") self._log.info("state=STOPPED.") self._is_running = False async def _await_engines_disconnected(self) -> None: self._log.info(f"Waiting for engines to disconnect " f"({self._disconnection_timeout}s timeout)...") seconds = self._disconnection_timeout timeout: timedelta = self._clock.utc_now() + timedelta(seconds=seconds) while True: await asyncio.sleep(0) if self._clock.utc_now() >= timeout: self._log.error( f"Timed out ({seconds}s) waiting for engines to disconnect." ) break if not self._data_engine.check_disconnected(): continue if not self._exec_engine.check_disconnected(): continue break def _cancel_all_tasks(self) -> None: to_cancel = asyncio.tasks.all_tasks(self._loop) if not to_cancel: self._log.info("All tasks finished.") return for task in to_cancel: self._log.warning(f"Cancelling pending task {task}") task.cancel() if self._loop.is_running(): self._log.warning( "Event loop still running during `cancel_all_tasks`.") return finish_all_tasks: asyncio.Future = asyncio.tasks.gather( *to_cancel, loop=self._loop, return_exceptions=True, ) self._loop.run_until_complete(finish_all_tasks) self._log.debug(f"{finish_all_tasks}") for task in to_cancel: if task.cancelled(): continue if task.exception() is not None: self._loop.call_exception_handler({ "message": "unhandled exception during asyncio.run() shutdown", "exception": task.exception(), "task": task, })
def setup(self): # Fixture Setup self.clock = TestClock() self.logger = Logger(self.clock) self.trader_id = TestStubs.trader_id() self.account_id = TestStubs.account_id() self.msgbus = MessageBus( trader_id=self.trader_id, clock=self.clock, logger=self.logger, ) self.cache = TestStubs.cache() self.portfolio = Portfolio( msgbus=self.msgbus, cache=self.cache, clock=self.clock, logger=self.logger, ) self.data_engine = DataEngine( msgbus=self.msgbus, cache=self.cache, clock=self.clock, logger=self.logger, ) self.data_engine.process(USDJPY_SIM) self.exec_engine = ExecutionEngine( msgbus=self.msgbus, cache=self.cache, clock=self.clock, logger=self.logger, ) self.exchange = SimulatedExchange( venue=Venue("SIM"), venue_type=VenueType.ECN, oms_type=OMSType.HEDGING, account_type=AccountType.MARGIN, base_currency=USD, starting_balances=[Money(1_000_000, USD)], default_leverage=Decimal(50), leverages={}, is_frozen_account=False, cache=self.cache, instruments=[USDJPY_SIM], modules=[], fill_model=FillModel(), clock=self.clock, logger=self.logger, ) self.data_client = BacktestMarketDataClient( client_id=ClientId("SIM"), msgbus=self.msgbus, cache=self.cache, clock=self.clock, logger=self.logger, ) self.exec_client = BacktestExecClient( exchange=self.exchange, account_id=self.account_id, msgbus=self.msgbus, cache=self.cache, clock=self.clock, logger=self.logger, ) self.risk_engine = RiskEngine( portfolio=self.portfolio, msgbus=self.msgbus, cache=self.cache, clock=self.clock, logger=self.logger, ) # Wire up components self.data_engine.register_client(self.data_client) self.exec_engine.register_client(self.exec_client) self.trader = Trader( trader_id=self.trader_id, msgbus=self.msgbus, cache=self.cache, portfolio=self.portfolio, data_engine=self.data_engine, risk_engine=self.risk_engine, exec_engine=self.exec_engine, clock=self.clock, logger=self.logger, )
def setUp(self): # Fixture Setup usdjpy = InstrumentLoader.default_fx_ccy( TestStubs.symbol_usdjpy_fxcm()) data = BacktestDataContainer() data.add_instrument(usdjpy) data.add_bars(usdjpy.symbol, BarAggregation.MINUTE, PriceType.BID, TestDataProvider.usdjpy_1min_bid()[:2000]) data.add_bars(usdjpy.symbol, BarAggregation.MINUTE, PriceType.ASK, TestDataProvider.usdjpy_1min_ask()[:2000]) clock = TestClock() uuid_factory = TestUUIDFactory() logger = TestLogger(clock) trader_id = TraderId("TESTER", "000") account_id = TestStubs.account_id() self.portfolio = Portfolio( clock=clock, uuid_factory=uuid_factory, logger=logger, ) data_engine = BacktestDataEngine( data=data, tick_capacity=1000, bar_capacity=1000, portfolio=self.portfolio, clock=clock, logger=logger, ) self.analyzer = PerformanceAnalyzer() self.exec_db = BypassExecutionDatabase( trader_id=trader_id, logger=logger, ) self.exec_engine = ExecutionEngine( database=self.exec_db, portfolio=self.portfolio, clock=clock, uuid_factory=uuid_factory, logger=logger, ) self.market = SimulatedMarket( venue=Venue("FXCM"), oms_type=OMSType.HEDGING, generate_position_ids=True, exec_cache=self.exec_engine.cache, instruments={usdjpy.symbol: usdjpy}, config=BacktestConfig(), fill_model=FillModel(), commission_model=GenericCommissionModel(), clock=clock, uuid_factory=TestUUIDFactory(), logger=logger, ) self.exec_client = BacktestExecClient( market=self.market, account_id=account_id, engine=self.exec_engine, logger=logger, ) self.exec_engine.register_client(self.exec_client) strategies = [ EmptyStrategy("001"), EmptyStrategy("002"), ] self.trader = Trader( trader_id=trader_id, strategies=strategies, data_engine=data_engine, exec_engine=self.exec_engine, clock=clock, uuid_factory=uuid_factory, logger=logger, )
class TestTrader: def setup(self): # Fixture Setup self.clock = TestClock() self.logger = Logger(self.clock) self.trader_id = TestStubs.trader_id() self.account_id = TestStubs.account_id() self.msgbus = MessageBus( trader_id=self.trader_id, clock=self.clock, logger=self.logger, ) self.cache = TestStubs.cache() self.portfolio = Portfolio( msgbus=self.msgbus, cache=self.cache, clock=self.clock, logger=self.logger, ) self.data_engine = DataEngine( msgbus=self.msgbus, cache=self.cache, clock=self.clock, logger=self.logger, ) self.data_engine.process(USDJPY_SIM) self.exec_engine = ExecutionEngine( msgbus=self.msgbus, cache=self.cache, clock=self.clock, logger=self.logger, ) self.exchange = SimulatedExchange( venue=Venue("SIM"), venue_type=VenueType.ECN, oms_type=OMSType.HEDGING, account_type=AccountType.MARGIN, base_currency=USD, starting_balances=[Money(1_000_000, USD)], default_leverage=Decimal(50), leverages={}, is_frozen_account=False, cache=self.cache, instruments=[USDJPY_SIM], modules=[], fill_model=FillModel(), clock=self.clock, logger=self.logger, ) self.data_client = BacktestMarketDataClient( client_id=ClientId("SIM"), msgbus=self.msgbus, cache=self.cache, clock=self.clock, logger=self.logger, ) self.exec_client = BacktestExecClient( exchange=self.exchange, account_id=self.account_id, msgbus=self.msgbus, cache=self.cache, clock=self.clock, logger=self.logger, ) self.risk_engine = RiskEngine( portfolio=self.portfolio, msgbus=self.msgbus, cache=self.cache, clock=self.clock, logger=self.logger, ) # Wire up components self.data_engine.register_client(self.data_client) self.exec_engine.register_client(self.exec_client) self.trader = Trader( trader_id=self.trader_id, msgbus=self.msgbus, cache=self.cache, portfolio=self.portfolio, data_engine=self.data_engine, risk_engine=self.risk_engine, exec_engine=self.exec_engine, clock=self.clock, logger=self.logger, ) def test_initialize_trader(self): # Arrange, Act, Assert assert self.trader.id == TraderId("TESTER-000") assert self.trader.is_initialized assert len(self.trader.strategy_states()) == 0 def test_add_strategy(self): # Arrange, Act self.trader.add_strategy(TradingStrategy()) # Assert assert self.trader.strategy_states() == { StrategyId("TradingStrategy-000"): "INITIALIZED" } def test_add_strategies(self): # Arrange strategies = [ TradingStrategy(TradingStrategyConfig(order_id_tag="001")), TradingStrategy(TradingStrategyConfig(order_id_tag="002")), ] # Act self.trader.add_strategies(strategies) # Assert assert self.trader.strategy_states() == { StrategyId("TradingStrategy-001"): "INITIALIZED", StrategyId("TradingStrategy-002"): "INITIALIZED", } def test_clear_strategies(self): # Arrange strategies = [ TradingStrategy(TradingStrategyConfig(order_id_tag="001")), TradingStrategy(TradingStrategyConfig(order_id_tag="002")), ] self.trader.add_strategies(strategies) # Act self.trader.clear_strategies() # Assert assert self.trader.strategy_states() == {} def test_add_actor(self): # Arrange config = ActorConfig(component_id="MyPlugin-01") actor = Actor(config) # Act self.trader.add_actor(actor) # Assert assert self.trader.actor_ids() == [ComponentId("MyPlugin-01")] def test_add_actors(self): # Arrange actors = [ Actor(ActorConfig(component_id="MyPlugin-01")), Actor(ActorConfig(component_id="MyPlugin-02")), ] # Act self.trader.add_actors(actors) # Assert assert self.trader.actor_ids() == [ ComponentId("MyPlugin-01"), ComponentId("MyPlugin-02"), ] def test_clear_actors(self): # Arrange actors = [ Actor(ActorConfig(component_id="MyPlugin-01")), Actor(ActorConfig(component_id="MyPlugin-02")), ] self.trader.add_actors(actors) # Act self.trader.clear_actors() # Assert assert self.trader.actor_ids() == [] def test_get_strategy_states(self): # Arrange strategies = [ TradingStrategy(TradingStrategyConfig(order_id_tag="001")), TradingStrategy(TradingStrategyConfig(order_id_tag="002")), ] self.trader.add_strategies(strategies) # Act status = self.trader.strategy_states() # Assert assert StrategyId("TradingStrategy-001") in status assert StrategyId("TradingStrategy-002") in status assert status[StrategyId("TradingStrategy-001")] == "INITIALIZED" assert status[StrategyId("TradingStrategy-002")] == "INITIALIZED" assert len(status) == 2 def test_change_strategies(self): # Arrange strategies = [ TradingStrategy(TradingStrategyConfig(order_id_tag="003")), TradingStrategy(TradingStrategyConfig(order_id_tag="004")), ] # Act self.trader.add_strategies(strategies) # Assert assert strategies[0].id in self.trader.strategy_states() assert strategies[1].id in self.trader.strategy_states() assert len(self.trader.strategy_states()) == 2 def test_start_a_trader(self): # Arrange strategies = [ TradingStrategy(TradingStrategyConfig(order_id_tag="001")), TradingStrategy(TradingStrategyConfig(order_id_tag="002")), ] self.trader.add_strategies(strategies) # Act self.trader.start() strategy_states = self.trader.strategy_states() # Assert assert self.trader.is_running assert strategy_states[StrategyId("TradingStrategy-001")] == "RUNNING" assert strategy_states[StrategyId("TradingStrategy-002")] == "RUNNING" def test_stop_a_running_trader(self): # Arrange strategies = [ TradingStrategy(TradingStrategyConfig(order_id_tag="001")), TradingStrategy(TradingStrategyConfig(order_id_tag="002")), ] self.trader.add_strategies(strategies) self.trader.start() # Act self.trader.stop() strategy_states = self.trader.strategy_states() # Assert assert self.trader.is_stopped assert strategy_states[StrategyId("TradingStrategy-001")] == "STOPPED" assert strategy_states[StrategyId("TradingStrategy-002")] == "STOPPED" def test_subscribe_to_msgbus_topic_adds_subscription(self): # Arrange consumer = [] # Act self.trader.subscribe("events*", consumer.append) # Assert assert len(self.msgbus.subscriptions("events*")) == 6 assert "events*" in self.msgbus.topics() assert self.msgbus.subscriptions( "events*")[-1].handler == consumer.append def test_unsubscribe_from_msgbus_topic_removes_subscription(self): # Arrange consumer = [] self.trader.subscribe("events*", consumer.append) # Act self.trader.unsubscribe("events*", consumer.append) # Assert assert len(self.msgbus.subscriptions("events*")) == 5
class TraderTests(unittest.TestCase): def setUp(self): # Fixture Setup usdjpy = InstrumentLoader.default_fx_ccy( TestStubs.symbol_usdjpy_fxcm()) data = BacktestDataContainer() data.add_instrument(usdjpy) data.add_bars(usdjpy.symbol, BarAggregation.MINUTE, PriceType.BID, TestDataProvider.usdjpy_1min_bid()[:2000]) data.add_bars(usdjpy.symbol, BarAggregation.MINUTE, PriceType.ASK, TestDataProvider.usdjpy_1min_ask()[:2000]) clock = TestClock() uuid_factory = TestUUIDFactory() logger = TestLogger(clock) trader_id = TraderId("TESTER", "000") account_id = TestStubs.account_id() self.portfolio = Portfolio( clock=clock, uuid_factory=uuid_factory, logger=logger, ) data_engine = BacktestDataEngine( data=data, tick_capacity=1000, bar_capacity=1000, portfolio=self.portfolio, clock=clock, logger=logger, ) self.analyzer = PerformanceAnalyzer() self.exec_db = BypassExecutionDatabase( trader_id=trader_id, logger=logger, ) self.exec_engine = ExecutionEngine( database=self.exec_db, portfolio=self.portfolio, clock=clock, uuid_factory=uuid_factory, logger=logger, ) self.market = SimulatedMarket( venue=Venue("FXCM"), oms_type=OMSType.HEDGING, generate_position_ids=True, exec_cache=self.exec_engine.cache, instruments={usdjpy.symbol: usdjpy}, config=BacktestConfig(), fill_model=FillModel(), commission_model=GenericCommissionModel(), clock=clock, uuid_factory=TestUUIDFactory(), logger=logger, ) self.exec_client = BacktestExecClient( market=self.market, account_id=account_id, engine=self.exec_engine, logger=logger, ) self.exec_engine.register_client(self.exec_client) strategies = [ EmptyStrategy("001"), EmptyStrategy("002"), ] self.trader = Trader( trader_id=trader_id, strategies=strategies, data_engine=data_engine, exec_engine=self.exec_engine, clock=clock, uuid_factory=uuid_factory, logger=logger, ) def test_initialize_trader(self): # Arrange # Act trader_id = self.trader.id # Assert self.assertEqual(TraderId("TESTER", "000"), trader_id) self.assertEqual(IdTag("000"), trader_id.tag) self.assertEqual(ComponentState.INITIALIZED, self.trader.state()) self.assertEqual(2, len(self.trader.strategy_states())) def test_get_strategy_states(self): # Arrange # Act status = self.trader.strategy_states() # Assert self.assertTrue(StrategyId("EmptyStrategy", "001") in status) self.assertTrue(StrategyId("EmptyStrategy", "002") in status) self.assertEqual('INITIALIZED', status[StrategyId("EmptyStrategy", "001")]) self.assertEqual('INITIALIZED', status[StrategyId("EmptyStrategy", "002")]) self.assertEqual(2, len(status)) def test_change_strategies(self): # Arrange strategies = [EmptyStrategy("003"), EmptyStrategy("004")] # Act self.trader.initialize_strategies(strategies) # Assert self.assertTrue(strategies[0].id in self.trader.strategy_states()) self.assertTrue(strategies[1].id in self.trader.strategy_states()) self.assertEqual(2, len(self.trader.strategy_states())) def test_trader_detects_none_unique_identifiers(self): # Arrange strategies = [EmptyStrategy("000"), EmptyStrategy("000")] # Act self.assertRaises(ValueError, self.trader.initialize_strategies, strategies) def test_start_a_trader(self): # Arrange # Act self.trader.start() strategy_states = self.trader.strategy_states() # Assert self.assertEqual(ComponentState.RUNNING, self.trader.state()) self.assertEqual('RUNNING', strategy_states[StrategyId("EmptyStrategy", "001")]) self.assertEqual('RUNNING', strategy_states[StrategyId("EmptyStrategy", "002")]) def test_stop_a_running_trader(self): # Arrange self.trader.start() # Act self.trader.stop() strategy_states = self.trader.strategy_states() # Assert self.assertEqual(ComponentState.STOPPED, self.trader.state()) self.assertEqual('STOPPED', strategy_states[StrategyId("EmptyStrategy", "001")]) self.assertEqual('STOPPED', strategy_states[StrategyId("EmptyStrategy", "002")])
def __init__(self, config: Optional[TradingNodeConfig] = None): if config is None: config = TradingNodeConfig() PyCondition.not_none(config, "config") PyCondition.type(config, TradingNodeConfig, "config") # Configuration self._config = config # Setup loop self._loop = asyncio.get_event_loop() self._executor = concurrent.futures.ThreadPoolExecutor() self._loop.set_default_executor(self._executor) self._loop.set_debug(config.loop_debug) # Components self._clock = LiveClock(loop=self._loop) self._uuid_factory = UUIDFactory() self.created_time = self._clock.utc_now() self._is_running = False # Identifiers self.trader_id = TraderId(config.trader_id) self.machine_id = socket.gethostname() self.instance_id = self._uuid_factory.generate() # Setup logging self._logger = LiveLogger( loop=self._loop, clock=self._clock, trader_id=self.trader_id, machine_id=self.machine_id, instance_id=self.instance_id, level_stdout=LogLevelParser.from_str_py(config.log_level.upper()), ) self._log = LoggerAdapter( component_name=type(self).__name__, logger=self._logger, ) self._log_header() self._log.info("Building...") if platform.system() != "Windows": # Windows does not support signal handling # https://stackoverflow.com/questions/45987985/asyncio-loops-add-signal-handler-in-windows self._setup_loop() ######################################################################## # Build platform ######################################################################## if config.cache_database is None or config.cache_database.type == "in-memory": cache_db = None elif config.cache_database.type == "redis": cache_db = RedisCacheDatabase( trader_id=self.trader_id, logger=self._logger, serializer=MsgPackSerializer(timestamps_as_str=True), config=config.cache_database, ) else: # pragma: no cover (design-time error) raise ValueError( "The cache_db_type in the configuration is unrecognized, " "can one of {{'in-memory', 'redis'}}.", ) self._msgbus = MessageBus( trader_id=self.trader_id, clock=self._clock, logger=self._logger, ) self._cache = Cache( database=cache_db, logger=self._logger, config=config.cache, ) self.portfolio = Portfolio( msgbus=self._msgbus, cache=self._cache, clock=self._clock, logger=self._logger, ) self._data_engine = LiveDataEngine( loop=self._loop, msgbus=self._msgbus, cache=self._cache, clock=self._clock, logger=self._logger, config=config.data_engine, ) self._exec_engine = LiveExecutionEngine( loop=self._loop, msgbus=self._msgbus, cache=self._cache, clock=self._clock, logger=self._logger, config=config.exec_engine, ) self._exec_engine.load_cache() self._risk_engine = LiveRiskEngine( loop=self._loop, portfolio=self.portfolio, msgbus=self._msgbus, cache=self._cache, clock=self._clock, logger=self._logger, config=config.risk_engine, ) self.trader = Trader( trader_id=self.trader_id, msgbus=self._msgbus, cache=self._cache, portfolio=self.portfolio, data_engine=self._data_engine, risk_engine=self._risk_engine, exec_engine=self._exec_engine, clock=self._clock, logger=self._logger, ) if config.load_strategy_state: self.trader.load() # Setup persistence (requires trader) self.persistence_writers: List[Any] = [] if config.persistence: self._setup_persistence(config=config.persistence) self._builder = TradingNodeBuilder( loop=self._loop, data_engine=self._data_engine, exec_engine=self._exec_engine, msgbus=self._msgbus, cache=self._cache, clock=self._clock, logger=self._logger, log=self._log, ) self._log.info("INITIALIZED.") self.time_to_initialize = self._clock.delta(self.created_time) self._log.info(f"Initialized in {int(self.time_to_initialize.total_seconds() * 1000)}ms.") self._is_built = False
class TradingNode: """ Provides an asynchronous network node for live trading. Parameters ---------- config : TradingNodeConfig, optional The configuration for the instance. Raises ------ TypeError If `config` is not of type `TradingNodeConfig`. """ def __init__(self, config: Optional[TradingNodeConfig] = None): if config is None: config = TradingNodeConfig() PyCondition.not_none(config, "config") PyCondition.type(config, TradingNodeConfig, "config") # Configuration self._config = config # Setup loop self._loop = asyncio.get_event_loop() self._executor = concurrent.futures.ThreadPoolExecutor() self._loop.set_default_executor(self._executor) self._loop.set_debug(config.loop_debug) # Components self._clock = LiveClock(loop=self._loop) self._uuid_factory = UUIDFactory() self.created_time = self._clock.utc_now() self._is_running = False # Identifiers self.trader_id = TraderId(config.trader_id) self.machine_id = socket.gethostname() self.instance_id = self._uuid_factory.generate() # Setup logging self._logger = LiveLogger( loop=self._loop, clock=self._clock, trader_id=self.trader_id, machine_id=self.machine_id, instance_id=self.instance_id, level_stdout=LogLevelParser.from_str_py(config.log_level.upper()), ) self._log = LoggerAdapter( component_name=type(self).__name__, logger=self._logger, ) self._log_header() self._log.info("Building...") if platform.system() != "Windows": # Windows does not support signal handling # https://stackoverflow.com/questions/45987985/asyncio-loops-add-signal-handler-in-windows self._setup_loop() ######################################################################## # Build platform ######################################################################## if config.cache_database is None or config.cache_database.type == "in-memory": cache_db = None elif config.cache_database.type == "redis": cache_db = RedisCacheDatabase( trader_id=self.trader_id, logger=self._logger, serializer=MsgPackSerializer(timestamps_as_str=True), config=config.cache_database, ) else: # pragma: no cover (design-time error) raise ValueError( "The cache_db_type in the configuration is unrecognized, " "can one of {{'in-memory', 'redis'}}.", ) self._msgbus = MessageBus( trader_id=self.trader_id, clock=self._clock, logger=self._logger, ) self._cache = Cache( database=cache_db, logger=self._logger, config=config.cache, ) self.portfolio = Portfolio( msgbus=self._msgbus, cache=self._cache, clock=self._clock, logger=self._logger, ) self._data_engine = LiveDataEngine( loop=self._loop, msgbus=self._msgbus, cache=self._cache, clock=self._clock, logger=self._logger, config=config.data_engine, ) self._exec_engine = LiveExecutionEngine( loop=self._loop, msgbus=self._msgbus, cache=self._cache, clock=self._clock, logger=self._logger, config=config.exec_engine, ) self._exec_engine.load_cache() self._risk_engine = LiveRiskEngine( loop=self._loop, portfolio=self.portfolio, msgbus=self._msgbus, cache=self._cache, clock=self._clock, logger=self._logger, config=config.risk_engine, ) self.trader = Trader( trader_id=self.trader_id, msgbus=self._msgbus, cache=self._cache, portfolio=self.portfolio, data_engine=self._data_engine, risk_engine=self._risk_engine, exec_engine=self._exec_engine, clock=self._clock, logger=self._logger, ) if config.load_strategy_state: self.trader.load() # Setup persistence (requires trader) self.persistence_writers: List[Any] = [] if config.persistence: self._setup_persistence(config=config.persistence) self._builder = TradingNodeBuilder( loop=self._loop, data_engine=self._data_engine, exec_engine=self._exec_engine, msgbus=self._msgbus, cache=self._cache, clock=self._clock, logger=self._logger, log=self._log, ) self._log.info("INITIALIZED.") self.time_to_initialize = self._clock.delta(self.created_time) self._log.info(f"Initialized in {int(self.time_to_initialize.total_seconds() * 1000)}ms.") self._is_built = False @property def is_running(self) -> bool: """ If the trading node is running. Returns ------- bool """ return self._is_running @property def is_built(self) -> bool: """ If the trading node clients are built. Returns ------- bool """ return self._is_built def get_event_loop(self) -> asyncio.AbstractEventLoop: """ Return the event loop of the trading node. Returns ------- asyncio.AbstractEventLoop """ return self._loop def get_logger(self) -> LiveLogger: """ Return the logger for the trading node. Returns ------- LiveLogger """ return self._logger def add_log_sink(self, handler: Callable[[Dict], None]): """ Register the given sink handler with the nodes logger. Parameters ---------- handler : Callable[[Dict], None] The sink handler to register. Raises ------ KeyError If `handler` already registered. """ self._logger.register_sink(handler=handler) def add_data_client_factory(self, name: str, factory): """ Add the given data client factory to the node. Parameters ---------- name : str The name of the client factory. factory : LiveDataClientFactory or LiveExecutionClientFactory The factory to add. Raises ------ ValueError If `name` is not a valid string. KeyError If `name` has already been added. """ self._builder.add_data_client_factory(name, factory) def add_exec_client_factory(self, name: str, factory): """ Add the given execution client factory to the node. Parameters ---------- name : str The name of the client factory. factory : LiveDataClientFactory or LiveExecutionClientFactory The factory to add. Raises ------ ValueError If `name` is not a valid string. KeyError If `name` has already been added. """ self._builder.add_exec_client_factory(name, factory) def build(self) -> None: """ Build the nodes clients. """ if self._is_built: raise RuntimeError("the trading nodes clients are already built.") self._builder.build_data_clients(self._config.data_clients) self._builder.build_exec_clients(self._config.exec_clients) self._is_built = True def start(self) -> Optional[asyncio.Task]: """ Start the trading node. """ if not self._is_built: raise RuntimeError( "The trading nodes clients have not been built. " "Please run `node.build()` prior to start." ) try: if self._loop.is_running(): return self._loop.create_task(self._run()) else: self._loop.run_until_complete(self._run()) return None except RuntimeError as ex: self._log.exception("Error on run", ex) return None def stop(self) -> None: """ Stop the trading node gracefully. After a specified delay the internal `Trader` residuals will be checked. If save strategy is specified then strategy states will then be saved. """ try: if self._loop.is_running(): self._loop.create_task(self._stop()) else: self._loop.run_until_complete(self._stop()) except RuntimeError as ex: self._log.exception("Error on stop", ex) def dispose(self) -> None: """ Dispose of the trading node. Gracefully shuts down the executor and event loop. """ try: timeout = self._clock.utc_now() + timedelta(seconds=self._config.timeout_disconnection) while self._is_running: time.sleep(0.1) if self._clock.utc_now() >= timeout: self._log.warning( f"Timed out ({self._config.timeout_disconnection}s) waiting for node to stop." f"\nStatus" f"\n------" f"\nDataEngine.check_disconnected() == {self._data_engine.check_disconnected()}" f"\nExecEngine.check_disconnected() == {self._exec_engine.check_disconnected()}" ) break self._log.info("DISPOSING...") self._log.debug(f"{self._data_engine.get_run_queue_task()}") self._log.debug(f"{self._exec_engine.get_run_queue_task()}") self._log.debug(f"{self._risk_engine.get_run_queue_task()}") self.trader.dispose() self._data_engine.dispose() self._exec_engine.dispose() self._risk_engine.dispose() self._log.info("Shutting down executor...") if sys.version_info >= (3, 9): # cancel_futures added in Python 3.9 self._executor.shutdown(wait=True, cancel_futures=True) else: self._executor.shutdown(wait=True) self._log.info("Stopping event loop...") self._cancel_all_tasks() self._loop.stop() except RuntimeError as ex: self._log.exception("Error on dispose", ex) finally: if self._loop.is_running(): self._log.warning("Cannot close a running event loop.") else: self._log.info("Closing event loop...") self._loop.close() # Check and log if event loop is running if self._loop.is_running(): self._log.warning(f"loop.is_running={self._loop.is_running()}") else: self._log.info(f"loop.is_running={self._loop.is_running()}") # Check and log if event loop is closed if not self._loop.is_closed(): self._log.warning(f"loop.is_closed={self._loop.is_closed()}") else: self._log.info(f"loop.is_closed={self._loop.is_closed()}") self._log.info("DISPOSED.") def _log_header(self) -> None: nautilus_header(self._log) self._log.info(f"redis {redis.__version__}") # type: ignore self._log.info(f"msgpack {msgpack.version[0]}.{msgpack.version[1]}.{msgpack.version[2]}") if uvloop_version: self._log.info(f"uvloop {uvloop_version}") self._log.info("\033[36m=================================================================") def _setup_loop(self) -> None: if self._loop.is_closed(): self._log.error("Cannot setup signal handling (event loop was closed).") return signal.signal(signal.SIGINT, signal.SIG_DFL) signals = (signal.SIGTERM, signal.SIGINT, signal.SIGABRT) for sig in signals: self._loop.add_signal_handler(sig, self._loop_sig_handler, sig) self._log.debug(f"Event loop signal handling setup for {signals}.") def _setup_persistence(self, config: PersistenceConfig) -> None: # Setup persistence path = f"{config.catalog_path}/live/{self.instance_id}.feather" writer = FeatherWriter( path=path, fs_protocol=config.fs_protocol, flush_interval=config.flush_interval, ) self.persistence_writers.append(writer) self.trader.subscribe("*", writer.write) self._log.info(f"Persisting data & events to {path=}") # Setup logging if config.persist_logs: def sink(record, f): f.write(orjson.dumps(record) + b"\n") path = f"{config.catalog_path}/logs/{self.instance_id}.log" log_sink = open(path, "wb") self.persistence_writers.append(log_sink) self._logger.register_sink(partial(sink, f=log_sink)) self._log.info(f"Persisting logs to {path=}") def _loop_sig_handler(self, sig) -> None: self._loop.remove_signal_handler(signal.SIGTERM) self._loop.add_signal_handler(signal.SIGINT, lambda: None) self._log.warning(f"Received {sig!s}, shutting down...") self.stop() async def _run(self) -> None: try: self._log.info("STARTING...") self._is_running = True # Start system self._logger.start() self._data_engine.start() self._exec_engine.start() self._risk_engine.start() # Connect all clients self._data_engine.connect() self._exec_engine.connect() # Await engine connection and initialization self._log.info( f"Waiting for engines to connect and initialize " f"({self._config.timeout_connection}s timeout)...", color=LogColor.BLUE, ) if not await self._await_engines_connected(): self._log.warning( f"Timed out ({self._config.timeout_connection}s) waiting for engines to connect and initialize." f"\nStatus" f"\n------" f"\nDataEngine.check_connected() == {self._data_engine.check_connected()}" f"\nExecEngine.check_connected() == {self._exec_engine.check_connected()}" ) return self._log.info("Engines connected.", color=LogColor.GREEN) # Await execution state reconciliation self._log.info( f"Waiting for execution state to reconcile " f"({self._config.timeout_reconciliation}s timeout)...", color=LogColor.BLUE, ) if not await self._exec_engine.reconcile_state( timeout_secs=self._config.timeout_reconciliation, ): self._log.error("Execution state could not be reconciled.") return self._log.info("State reconciled.", color=LogColor.GREEN) # Initialize portfolio self.portfolio.initialize_orders() self.portfolio.initialize_positions() # Await portfolio initialization self._log.info( "Waiting for portfolio to initialize " f"({self._config.timeout_portfolio}s timeout)...", color=LogColor.BLUE, ) if not await self._await_portfolio_initialized(): self._log.warning( f"Timed out ({self._config.timeout_portfolio}s) waiting for portfolio to initialize." f"\nStatus" f"\n------" f"\nPortfolio.initialized == {self.portfolio.initialized}" ) return self._log.info("Portfolio initialized.", color=LogColor.GREEN) # Start trader and strategies self.trader.start() if self._loop.is_running(): self._log.info("RUNNING.") else: self._log.warning("Event loop is not running.") # Continue to run while engines are running... await self._data_engine.get_run_queue_task() await self._exec_engine.get_run_queue_task() await self._risk_engine.get_run_queue_task() except asyncio.CancelledError as ex: self._log.error(str(ex)) async def _await_engines_connected(self) -> bool: # - The data engine clients will be set connected when all # instruments are received and updated with the data engine. # - The execution engine clients will be set connected when all # accounts are updated and the current order and position status is # reconciled. # Thus any delay here will be due to blocking network I/O. seconds = self._config.timeout_connection timeout: timedelta = self._clock.utc_now() + timedelta(seconds=seconds) while True: await asyncio.sleep(0) if self._clock.utc_now() >= timeout: return False if not self._data_engine.check_connected(): continue if not self._exec_engine.check_connected(): continue break return True # Engines connected async def _await_portfolio_initialized(self) -> bool: # - The portfolio will be set initialized when all margin and unrealized # PnL calculations are completed (maybe waiting on first quotes). # Thus any delay here will be due to blocking network I/O. seconds = self._config.timeout_portfolio timeout: timedelta = self._clock.utc_now() + timedelta(seconds=seconds) while True: await asyncio.sleep(0) if self._clock.utc_now() >= timeout: return False if not self.portfolio.initialized: continue break return True # Portfolio initialized async def _stop(self) -> None: self._is_stopping = True self._log.info("STOPPING...") if self.trader.is_running: self.trader.stop() self._log.info( f"Awaiting residual state ({self._config.check_residuals_delay}s delay)...", color=LogColor.BLUE, ) await asyncio.sleep(self._config.check_residuals_delay) self.trader.check_residuals() if self._config.save_strategy_state: self.trader.save() # Disconnect all clients self._data_engine.disconnect() self._exec_engine.disconnect() if self._data_engine.is_running: self._data_engine.stop() if self._exec_engine.is_running: self._exec_engine.stop() if self._risk_engine.is_running: self._risk_engine.stop() self._log.info( f"Waiting for engines to disconnect " f"({self._config.timeout_disconnection}s timeout)...", color=LogColor.BLUE, ) if not await self._await_engines_disconnected(): self._log.error( f"Timed out ({self._config.timeout_disconnection}s) waiting for engines to disconnect." f"\nStatus" f"\n------" f"\nDataEngine.check_disconnected() == {self._data_engine.check_disconnected()}" f"\nExecEngine.check_disconnected() == {self._exec_engine.check_disconnected()}" ) # Clean up remaining timers timer_names = self._clock.timer_names() self._clock.cancel_timers() for name in timer_names: self._log.info(f"Cancelled Timer(name={name}).") # Clean up persistence for writer in self.persistence_writers: writer.close() self._log.info("STOPPED.") self._logger.stop() self._is_running = False async def _await_engines_disconnected(self) -> bool: seconds = self._config.timeout_disconnection timeout: timedelta = self._clock.utc_now() + timedelta(seconds=seconds) while True: await asyncio.sleep(0) if self._clock.utc_now() >= timeout: return False if not self._data_engine.check_disconnected(): continue if not self._exec_engine.check_disconnected(): continue break return True # Engines disconnected def _cancel_all_tasks(self) -> None: to_cancel = asyncio.tasks.all_tasks(self._loop) if not to_cancel: self._log.info("All tasks canceled.") return for task in to_cancel: self._log.warning(f"Canceling pending task {task}") task.cancel() if self._loop.is_running(): self._log.warning("Event loop still running during `cancel_all_tasks`.") return finish_all_tasks: asyncio.Future = asyncio.tasks.gather( # type: ignore *to_cancel, loop=self._loop, return_exceptions=True, ) self._loop.run_until_complete(finish_all_tasks) self._log.debug(f"{finish_all_tasks}") for task in to_cancel: # pragma: no cover if task.cancelled(): continue if task.exception() is not None: self._loop.call_exception_handler( { "message": "unhandled exception during asyncio.run() shutdown", "exception": task.exception(), "task": task, } )
class TradingNode: """ Provides an asynchronous network node for live trading. """ def __init__( self, strategies: List[TradingStrategy], config: Dict[str, object], ): """ Initialize a new instance of the TradingNode class. Parameters ---------- strategies : list[TradingStrategy] The list of strategies to run on the trading node. config : dict[str, object] The configuration for the trading node. Raises ------ ValueError If strategies is None or empty. ValueError If config is None or empty. """ PyCondition.not_none(strategies, "strategies") PyCondition.not_none(config, "config") PyCondition.not_empty(strategies, "strategies") PyCondition.not_empty(config, "config") # Extract configs config_trader = config.get("trader", {}) config_log = config.get("logging", {}) config_exec_db = config.get("exec_database", {}) config_strategy = config.get("strategy", {}) config_adapters = config.get("adapters", {}) self._uuid_factory = UUIDFactory() self._loop = asyncio.get_event_loop() self._executor = concurrent.futures.ThreadPoolExecutor() self._loop.set_default_executor(self._executor) self._clock = LiveClock(loop=self._loop) self.created_time = self._clock.utc_now() self._is_running = False # Uncomment for debugging # self._loop.set_debug(True) # Setup identifiers self.trader_id = TraderId( name=config_trader["name"], tag=config_trader["id_tag"], ) # Setup logging self._logger = LiveLogger( clock=self._clock, name=self.trader_id.value, level_console=LogLevelParser.from_str_py(config_log.get("log_level_console")), level_file=LogLevelParser.from_str_py(config_log.get("log_level_file")), level_store=LogLevelParser.from_str_py(config_log.get("log_level_store")), run_in_process=config_log.get("run_in_process", True), # Run logger in a separate process log_thread=config_log.get("log_thread_id", False), log_to_file=config_log.get("log_to_file", False), log_file_path=config_log.get("log_file_path", ""), ) self._log = LoggerAdapter(component_name=self.__class__.__name__, logger=self._logger) self._log_header() self._log.info("Building...") self._setup_loop() # Requires the logger to be initialized self.portfolio = Portfolio( clock=self._clock, logger=self._logger, ) self._data_engine = LiveDataEngine( loop=self._loop, portfolio=self.portfolio, clock=self._clock, logger=self._logger, config={"qsize": 10000}, ) self.portfolio.register_cache(self._data_engine.cache) self.analyzer = PerformanceAnalyzer() if config_exec_db["type"] == "redis": exec_db = RedisExecutionDatabase( trader_id=self.trader_id, logger=self._logger, command_serializer=MsgPackCommandSerializer(), event_serializer=MsgPackEventSerializer(), config={ "host": config_exec_db["host"], "port": config_exec_db["port"], } ) else: exec_db = BypassExecutionDatabase( trader_id=self.trader_id, logger=self._logger, ) self._exec_engine = LiveExecutionEngine( loop=self._loop, database=exec_db, portfolio=self.portfolio, clock=self._clock, logger=self._logger, config={"qsize": 10000}, ) self._exec_engine.load_cache() self._setup_adapters(config_adapters, self._logger) self.trader = Trader( trader_id=self.trader_id, strategies=strategies, portfolio=self.portfolio, data_engine=self._data_engine, exec_engine=self._exec_engine, clock=self._clock, logger=self._logger, ) self._check_residuals_delay = config_trader.get("check_residuals_delay", 5.0) self._load_strategy_state = config_strategy.get("load_state", True) self._save_strategy_state = config_strategy.get("save_state", True) if self._load_strategy_state: self.trader.load() self._log.info("state=INITIALIZED.") self.time_to_initialize = self._clock.delta(self.created_time) self._log.info(f"Initialized in {self.time_to_initialize.total_seconds():.3f}s.") @property def is_running(self) -> bool: """ If the trading node is running. Returns ------- bool True if running, else False. """ return self._is_running def get_event_loop(self) -> asyncio.AbstractEventLoop: """ Return the event loop of the trading node. Returns ------- asyncio.AbstractEventLoop """ return self._loop def get_logger(self) -> LiveLogger: """ Return the logger for the trading node. Returns ------- LiveLogger """ return self._logger def start(self) -> None: """ Start the trading node. """ try: if self._loop.is_running(): self._loop.create_task(self._run()) else: self._loop.run_until_complete(self._run()) except RuntimeError as ex: self._log.exception(ex) def stop(self) -> None: """ Stop the trading node gracefully. After a specified delay the internal `Trader` residuals will be checked. If save strategy is specified then strategy states will then be saved. """ try: if self._loop.is_running(): self._loop.create_task(self._stop()) else: self._loop.run_until_complete(self._stop()) except RuntimeError as ex: self._log.exception(ex) def dispose(self) -> None: """ Dispose of the trading node. Gracefully shuts down the executor and event loop. """ try: timeout = self._clock.utc_now() + timedelta(seconds=5) while self._is_running: time.sleep(0.1) if self._clock.utc_now() >= timeout: self._log.warning("Timed out (5s) waiting for node to stop.") break self._log.info("state=DISPOSING...") self._log.debug(f"{self._data_engine.get_run_queue_task()}") self._log.debug(f"{self._exec_engine.get_run_queue_task()}") self.trader.dispose() self._data_engine.dispose() self._exec_engine.dispose() self._log.info("Shutting down executor...") if sys.version_info >= (3, 9): # cancel_futures added in Python 3.9 self._executor.shutdown(wait=True, cancel_futures=True) else: self._executor.shutdown(wait=True) self._log.info("Stopping event loop...") self._loop.stop() self._cancel_all_tasks() except RuntimeError as ex: self._log.error("Shutdown coro issues will be fixed soon...") # TODO: Remove when fixed self._log.exception(ex) finally: if self._loop.is_running(): self._log.warning("Cannot close a running event loop.") else: self._log.info("Closing event loop...") self._loop.close() # Check and log if event loop is running if self._loop.is_running(): self._log.warning(f"loop.is_running={self._loop.is_running()}") else: self._log.info(f"loop.is_running={self._loop.is_running()}") # Check and log if event loop is closed if not self._loop.is_closed(): self._log.warning(f"loop.is_closed={self._loop.is_closed()}") else: self._log.info(f"loop.is_closed={self._loop.is_closed()}") self._log.info("state=DISPOSED.") self._logger.stop() # Ensure process is stopped time.sleep(0.1) # Ensure final log messages def _log_header(self) -> None: nautilus_header(self._log) self._log.info(f"redis {redis.__version__}") self._log.info(f"msgpack {msgpack.version[0]}.{msgpack.version[1]}.{msgpack.version[2]}") if uvloop_version: self._log.info(f"uvloop {uvloop_version}") self._log.info("=================================================================") def _setup_loop(self) -> None: if self._loop.is_closed(): self._log.error("Cannot setup signal handling (event loop was closed).") return signal.signal(signal.SIGINT, signal.SIG_DFL) signals = (signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGABRT) for sig in signals: self._loop.add_signal_handler(sig, self._loop_sig_handler, sig) self._log.debug(f"Event loop {signals} handling setup.") def _loop_sig_handler(self, sig: signal.signal) -> None: self._loop.remove_signal_handler(signal.SIGTERM) self._loop.add_signal_handler(signal.SIGINT, lambda: None) self._log.warning(f"Received {sig!s}, shutting down...") self.stop() def _setup_adapters(self, config: Dict[str, object], logger: LiveLogger) -> None: # Setup each data client for name, config in config.items(): if name.startswith("ccxt-"): try: import ccxtpro # TODO: Find a better way of doing this except ImportError: raise ImportError("ccxtpro is not installed, " "installation instructions can be found at https://ccxt.pro") client_cls = getattr(ccxtpro, name.partition('-')[2].lower()) data_client, exec_client = CCXTClientsFactory.create( client_cls=client_cls, config=config, data_engine=self._data_engine, exec_engine=self._exec_engine, clock=self._clock, logger=logger, ) elif name == "oanda": data_client = OandaDataClientFactory.create( config=config, data_engine=self._data_engine, clock=self._clock, logger=logger, ) exec_client = None # TODO: Implement else: self._log.error(f"No adapter available for `{name}`.") continue if data_client is not None: self._data_engine.register_client(data_client) if exec_client is not None: self._exec_engine.register_client(exec_client) async def _run(self) -> None: try: self._log.info("state=STARTING...") self._is_running = True self._data_engine.start() self._exec_engine.start() result: bool = await self._await_engines_connected() if not result: return result: bool = await self._exec_engine.resolve_state() if not result: return self.trader.start() if self._loop.is_running(): self._log.info("state=RUNNING.") else: self._log.warning("Event loop is not running.") # Continue to run while engines are running... await self._data_engine.get_run_queue_task() await self._exec_engine.get_run_queue_task() except asyncio.CancelledError as ex: self._log.error(str(ex)) async def _await_engines_connected(self) -> bool: self._log.info("Waiting for engines to initialize...") # The data engine clients will be set as connected when all # instruments are received and updated with the data engine. # The execution engine clients will be set as connected when all # accounts are updated and the current order and position status is # confirmed. Thus any delay here will be due to blocking network IO. seconds = 5 # Hard coded for now timeout: timedelta = self._clock.utc_now() + timedelta(seconds=seconds) while True: await asyncio.sleep(0.1) if self._clock.utc_now() >= timeout: self._log.error(f"Timed out ({seconds}s) waiting for " f"engines to initialize.") return False if not self._data_engine.check_connected(): continue if not self._exec_engine.check_connected(): continue break return True # Engines initialized async def _stop(self) -> None: self._is_stopping = True self._log.info("state=STOPPING...") if self.trader.state == ComponentState.RUNNING: self.trader.stop() self._log.info(f"Awaiting residual state ({self._check_residuals_delay}s delay)...") await asyncio.sleep(self._check_residuals_delay) self.trader.check_residuals() if self._save_strategy_state: self.trader.save() if self._data_engine.state == ComponentState.RUNNING: self._data_engine.stop() if self._exec_engine.state == ComponentState.RUNNING: self._exec_engine.stop() await self._await_engines_disconnected() # Clean up remaining timers timer_names = self._clock.timer_names() self._clock.cancel_timers() for name in timer_names: self._log.info(f"Cancelled Timer(name={name}).") self._log.info("state=STOPPED.") self._is_running = False async def _await_engines_disconnected(self) -> None: self._log.info("Waiting for engines to disconnect...") seconds = 5 # Hard coded for now timeout: timedelta = self._clock.utc_now() + timedelta(seconds=seconds) while True: await asyncio.sleep(0.1) if self._clock.utc_now() >= timeout: self._log.warning(f"Timed out ({seconds}s) waiting for engines to disconnect.") break if not self._data_engine.check_disconnected(): continue if not self._exec_engine.check_disconnected(): continue break # Engines initialized def _cancel_all_tasks(self) -> None: to_cancel = asyncio.tasks.all_tasks(self._loop) if not to_cancel: self._log.info("All tasks finished.") return for task in to_cancel: self._log.warning(f"Cancelling pending task {task}") task.cancel() if self._loop.is_running(): self._log.warning("Event loop still running during `cancel_all_tasks`.") return finish_all_tasks: asyncio.Future = asyncio.tasks.gather( *to_cancel, loop=self._loop, return_exceptions=True, ) self._loop.run_until_complete(finish_all_tasks) self._log.debug(f"{finish_all_tasks}") for task in to_cancel: if task.cancelled(): continue if task.exception() is not None: self._loop.call_exception_handler({ 'message': 'unhandled exception during asyncio.run() shutdown', 'exception': task.exception(), 'task': task, })
def setUp(self): # Fixture Setup clock = TestClock() logger = Logger(clock) trader_id = TraderId("TESTER-000") account_id = TestStubs.account_id() self.cache = TestStubs.cache() self.portfolio = Portfolio( cache=self.cache, clock=clock, logger=logger, ) self.data_engine = DataEngine( portfolio=self.portfolio, cache=self.cache, clock=clock, logger=logger, config={"use_previous_close": False}, ) self.data_engine.process(USDJPY_SIM) self.exec_engine = ExecutionEngine( portfolio=self.portfolio, cache=self.cache, clock=clock, logger=logger, ) self.exchange = SimulatedExchange( venue=Venue("SIM"), venue_type=VenueType.ECN, oms_type=OMSType.HEDGING, account_type=AccountType.MARGIN, base_currency=USD, starting_balances=[Money(1_000_000, USD)], is_frozen_account=False, cache=self.exec_engine.cache, instruments=[USDJPY_SIM], modules=[], fill_model=FillModel(), clock=clock, logger=logger, ) self.data_client = BacktestMarketDataClient( client_id=ClientId("SIM"), engine=self.data_engine, clock=clock, logger=logger, ) self.exec_client = BacktestExecClient( exchange=self.exchange, account_id=account_id, account_type=AccountType.MARGIN, base_currency=USD, engine=self.exec_engine, clock=clock, logger=logger, ) self.risk_engine = RiskEngine( exec_engine=self.exec_engine, portfolio=self.portfolio, cache=self.cache, clock=clock, logger=logger, ) # Wire up components self.data_engine.register_client(self.data_client) self.exec_engine.register_risk_engine(self.risk_engine) self.exec_engine.register_client(self.exec_client) strategies = [ TradingStrategy("001"), TradingStrategy("002"), ] self.trader = Trader( trader_id=trader_id, strategies=strategies, portfolio=self.portfolio, data_engine=self.data_engine, risk_engine=self.risk_engine, exec_engine=self.exec_engine, clock=clock, logger=logger, )
def __init__( self, strategies: List[TradingStrategy], config: Dict[str, object], ): """ Initialize a new instance of the TradingNode class. Parameters ---------- strategies : list[TradingStrategy] The list of strategies to run on the trading node. config : dict[str, object] The configuration for the trading node. Raises ------ ValueError If strategies is None or empty. ValueError If config is None or empty. """ PyCondition.not_none(strategies, "strategies") PyCondition.not_none(config, "config") PyCondition.not_empty(strategies, "strategies") PyCondition.not_empty(config, "config") # Extract configs config_trader = config.get("trader", {}) config_log = config.get("logging", {}) config_exec_db = config.get("exec_database", {}) config_strategy = config.get("strategy", {}) config_adapters = config.get("adapters", {}) self._uuid_factory = UUIDFactory() self._loop = asyncio.get_event_loop() self._executor = concurrent.futures.ThreadPoolExecutor() self._loop.set_default_executor(self._executor) self._clock = LiveClock(loop=self._loop) self.created_time = self._clock.utc_now() self._is_running = False # Uncomment for debugging # self._loop.set_debug(True) # Setup identifiers self.trader_id = TraderId( name=config_trader["name"], tag=config_trader["id_tag"], ) # Setup logging self._logger = LiveLogger( clock=self._clock, name=self.trader_id.value, level_console=LogLevelParser.from_str_py(config_log.get("log_level_console")), level_file=LogLevelParser.from_str_py(config_log.get("log_level_file")), level_store=LogLevelParser.from_str_py(config_log.get("log_level_store")), run_in_process=config_log.get("run_in_process", True), # Run logger in a separate process log_thread=config_log.get("log_thread_id", False), log_to_file=config_log.get("log_to_file", False), log_file_path=config_log.get("log_file_path", ""), ) self._log = LoggerAdapter(component_name=self.__class__.__name__, logger=self._logger) self._log_header() self._log.info("Building...") self._setup_loop() # Requires the logger to be initialized self.portfolio = Portfolio( clock=self._clock, logger=self._logger, ) self._data_engine = LiveDataEngine( loop=self._loop, portfolio=self.portfolio, clock=self._clock, logger=self._logger, config={"qsize": 10000}, ) self.portfolio.register_cache(self._data_engine.cache) self.analyzer = PerformanceAnalyzer() if config_exec_db["type"] == "redis": exec_db = RedisExecutionDatabase( trader_id=self.trader_id, logger=self._logger, command_serializer=MsgPackCommandSerializer(), event_serializer=MsgPackEventSerializer(), config={ "host": config_exec_db["host"], "port": config_exec_db["port"], } ) else: exec_db = BypassExecutionDatabase( trader_id=self.trader_id, logger=self._logger, ) self._exec_engine = LiveExecutionEngine( loop=self._loop, database=exec_db, portfolio=self.portfolio, clock=self._clock, logger=self._logger, config={"qsize": 10000}, ) self._exec_engine.load_cache() self._setup_adapters(config_adapters, self._logger) self.trader = Trader( trader_id=self.trader_id, strategies=strategies, portfolio=self.portfolio, data_engine=self._data_engine, exec_engine=self._exec_engine, clock=self._clock, logger=self._logger, ) self._check_residuals_delay = config_trader.get("check_residuals_delay", 5.0) self._load_strategy_state = config_strategy.get("load_state", True) self._save_strategy_state = config_strategy.get("save_state", True) if self._load_strategy_state: self.trader.load() self._log.info("state=INITIALIZED.") self.time_to_initialize = self._clock.delta(self.created_time) self._log.info(f"Initialized in {self.time_to_initialize.total_seconds():.3f}s.")
class TraderTests(unittest.TestCase): def setUp(self): # Fixture Setup usdjpy = TestStubs.instrument_usdjpy() data = BacktestDataContainer() data.add_instrument(usdjpy) data.add_bars(usdjpy.symbol, BarStructure.MINUTE, PriceType.BID, TestDataProvider.usdjpy_1min_bid()[:2000]) data.add_bars(usdjpy.symbol, BarStructure.MINUTE, PriceType.ASK, TestDataProvider.usdjpy_1min_ask()[:2000]) clock = TestClock() guid_factory = TestGuidFactory() logger = TestLogger() trader_id = TraderId('TESTER', '000') account_id = TestStubs.account_id() data_client = BacktestDataClient(data=data, tick_capacity=100, clock=clock, logger=logger) self.portfolio = Portfolio(currency=Currency.USD, clock=clock, guid_factory=guid_factory, logger=logger) self.analyzer = PerformanceAnalyzer() self.exec_db = InMemoryExecutionDatabase(trader_id=trader_id, logger=logger) self.exec_engine = ExecutionEngine(trader_id=trader_id, account_id=account_id, database=self.exec_db, portfolio=self.portfolio, clock=clock, guid_factory=guid_factory, logger=logger) self.exec_client = BacktestExecClient( exec_engine=self.exec_engine, instruments={usdjpy.symbol: usdjpy}, config=BacktestConfig(), fill_model=FillModel(), clock=clock, guid_factory=guid_factory, logger=logger) self.exec_engine.register_client(self.exec_client) strategies = [EmptyStrategy('001'), EmptyStrategy('002')] self.trader = Trader(trader_id=trader_id, account_id=account_id, strategies=strategies, data_client=data_client, exec_engine=self.exec_engine, clock=clock, guid_factory=guid_factory, logger=logger) def test_can_initialize_trader(self): # Arrange # Act trader_id = self.trader.id # Assert self.assertEqual(TraderId('TESTER', '000'), trader_id) self.assertEqual(IdTag('000'), trader_id.order_id_tag) self.assertFalse(self.trader.is_running) self.assertEqual(0, len(self.trader.started_datetimes)) self.assertEqual(0, len(self.trader.stopped_datetimes)) self.assertEqual(2, len(self.trader.strategy_status())) def test_can_get_strategy_status(self): # Arrange # Act status = self.trader.strategy_status() # Assert self.assertTrue(StrategyId('EmptyStrategy', '001') in status) self.assertTrue(StrategyId('EmptyStrategy', '002') in status) self.assertFalse(status[StrategyId('EmptyStrategy', '001')]) self.assertFalse(status[StrategyId('EmptyStrategy', '002')]) self.assertEqual(2, len(status)) def test_can_change_strategies(self): # Arrange strategies = [EmptyStrategy('003'), EmptyStrategy('004')] # Act self.trader.initialize_strategies(strategies) # Assert self.assertTrue(strategies[0].id in self.trader.strategy_status()) self.assertTrue(strategies[1].id in self.trader.strategy_status()) self.assertEqual(2, len(self.trader.strategy_status())) def test_trader_detects_none_unique_identifiers(self): # Arrange strategies = [EmptyStrategy('000'), EmptyStrategy('000')] # Act self.assertRaises(ValueError, self.trader.initialize_strategies, strategies) def test_can_start_a_trader(self): # Arrange # Act self.trader.start() # Assert self.assertTrue(self.trader.is_running) self.assertEqual(1, len(self.trader.started_datetimes)) self.assertEqual(0, len(self.trader.stopped_datetimes)) self.assertTrue( StrategyId('EmptyStrategy', '001') in self.trader.strategy_status()) self.assertTrue( StrategyId('EmptyStrategy', '002') in self.trader.strategy_status()) self.assertTrue(self.trader.strategy_status()[StrategyId( 'EmptyStrategy', '001')]) self.assertTrue(self.trader.strategy_status()[StrategyId( 'EmptyStrategy', '002')]) def test_can_stop_a_running_trader(self): # Arrange self.trader.start() # Act self.trader.stop() # Assert self.assertFalse(self.trader.is_running) self.assertEqual(1, len(self.trader.started_datetimes)) self.assertEqual(1, len(self.trader.stopped_datetimes)) self.assertTrue( StrategyId('EmptyStrategy', '001') in self.trader.strategy_status()) self.assertTrue( StrategyId('EmptyStrategy', '002') in self.trader.strategy_status()) self.assertFalse(self.trader.strategy_status()[StrategyId( 'EmptyStrategy', '001')]) self.assertFalse(self.trader.strategy_status()[StrategyId( 'EmptyStrategy', '002')])