def start_server(self, consume_rpcs=True): """Server startup procedure Must be called from within the main thread """ # Ensure an event loop exists get_event_loop() self._server_shutdown_queue = janus.Queue() self._server_tasks = set() async def server_shutdown_monitor(): exit_code = await self._server_shutdown_queue.async_q.get() self.exit_code = exit_code self.loop.stop() self._server_shutdown_queue.async_q.task_done() shutdown_monitor_task = asyncio.ensure_future(server_shutdown_monitor()) shutdown_monitor_task.add_done_callback(make_exception_checker(self, die=True)) self._shutdown_monitor_task = shutdown_monitor_task block(self._start_server_inner())
def new_thread_setup(): # Make sure each thread has an event loop available, even if it # isn't actually running (stops janus.Queue barfing) get_event_loop() return fn()
def create( config: Union[dict, RootConfig] = None, *, config_file: str = None, service_name: str = None, process_name: str = None, features: List[Union[Feature, str]] = ALL_FEATURES, client_class: Type[BusClient] = BusClient, node_class: Type[BusPath] = BusPath, plugins=None, flask: bool = False, **kwargs, ) -> BusPath: """ Create a new bus instance which can be used to access the bus. Typically this will be used as follows: import lightbus bus = lightbus.create() This will be a `BusPath` instance. If you wish to access the lower level `BusClient` you can do so via `bus.client`. Args: config (dict, Config): The config object or dictionary to load config_file (str): The path to a config file to load (should end in .json or .yaml) service_name (str): The name of this service - will be used when creating event consumer groups process_name (str): The unique name of this process - used when retrieving unprocessed events following a crash client_class (Type[BusClient]): The class from which the bus client will be instantiated node_class (BusPath): The class from which the bus path will be instantiated plugins (list): A list of plugin instances to load flask (bool): Are we using flask? If so we will make sure we don't start lightbus in the reloader process **kwargs (): Any additional instantiation arguments to be passed to `client_class`. Returns: BusPath """ if flask: in_flask_server = sys.argv[0].endswith("flask") and "run" in sys.argv if in_flask_server and os.environ.get("WERKZEUG_RUN_MAIN", "").lower() != "true": # Flask has a reloader process that shouldn't start a lightbus client return # Ensure an event loop exists, as creating InternalQueue # objects requires that we have one. get_event_loop() # If were are running via the Lightbus CLI then we may have # some command line arguments we need to apply. # pylint: disable=cyclic-import,import-outside-toplevel from lightbus.commands import COMMAND_PARSED_ARGS config_file = COMMAND_PARSED_ARGS.get("config_file", None) or config_file service_name = COMMAND_PARSED_ARGS.get("service_name", None) or service_name process_name = COMMAND_PARSED_ARGS.get("process_name", None) or process_name if config is None: config = load_config( from_file=config_file, service_name=service_name, process_name=process_name ) if isinstance(config, Mapping): config = Config.load_dict(config or {}) elif isinstance(config, RootConfig): config = Config(config) transport_registry = kwargs.pop("transport_registry", None) or TransportRegistry().load_config( config ) schema = Schema( schema_transport=transport_registry.get_schema_transport(), max_age_seconds=config.bus().schema.ttl, human_readable=config.bus().schema.human_readable, ) error_queue: ErrorQueueType = InternalQueue() # Plugin registry plugin_registry = PluginRegistry() if plugins is None: logger.debug("Auto-loading any installed Lightbus plugins...") plugin_registry.autoload_plugins(config) else: logger.debug("Loading explicitly specified Lightbus plugins....") plugin_registry.set_plugins(plugins) # Hook registry hook_registry = HookRegistry( error_queue=error_queue, execute_plugin_hooks=plugin_registry.execute_hook ) # API registry api_registry = ApiRegistry() api_registry.add(LightbusStateApi()) api_registry.add(LightbusMetricsApi()) events_queue_client_to_dock = InternalQueue() events_queue_dock_to_client = InternalQueue() event_client = EventClient( api_registry=api_registry, hook_registry=hook_registry, config=config, schema=schema, error_queue=error_queue, consume_from=events_queue_dock_to_client, produce_to=events_queue_client_to_dock, ) event_dock = EventDock( transport_registry=transport_registry, api_registry=api_registry, config=config, error_queue=error_queue, consume_from=events_queue_client_to_dock, produce_to=events_queue_dock_to_client, ) rpcs_queue_client_to_dock = InternalQueue() rpcs_queue_dock_to_client = InternalQueue() rpc_result_client = RpcResultClient( api_registry=api_registry, hook_registry=hook_registry, config=config, schema=schema, error_queue=error_queue, consume_from=rpcs_queue_dock_to_client, produce_to=rpcs_queue_client_to_dock, ) rpc_result_dock = RpcResultDock( transport_registry=transport_registry, api_registry=api_registry, config=config, error_queue=error_queue, consume_from=rpcs_queue_client_to_dock, produce_to=rpcs_queue_dock_to_client, ) client = client_class( config=config, hook_registry=hook_registry, plugin_registry=plugin_registry, features=features, schema=schema, api_registry=api_registry, event_client=event_client, rpc_result_client=rpc_result_client, error_queue=error_queue, transport_registry=transport_registry, **kwargs, ) # Pass the client to any hooks # (use a weakref to prevent circular references) hook_registry.set_extra_parameter("client", weakref.proxy(client)) # We don't do this normally as the docks do not need to be # accessed directly, but this is useful in testing # TODO: Testing flag removed, but these are only needed in testing. # Perhaps wrap them up in a way that makes this obvious client.event_dock = event_dock client.rpc_result_dock = rpc_result_dock log_welcome_message( logger=logger, transport_registry=transport_registry, schema=schema, plugin_registry=plugin_registry, config=config, ) return node_class(name="", parent=None, client=client)
async def start_worker(self): """Worker startup procedure """ # Ensure an event loop exists get_event_loop() self._worker_tasks = set() # Start monitoring for errors on the error queue error_monitor_task = asyncio.ensure_future(self.error_monitor()) self._error_monitor_task = error_monitor_task self._worker_tasks.add(self._error_monitor_task) # Features setup & logging if not self.api_registry.all() and Feature.RPCS in self.features: logger.info( "Disabling serving of RPCs as no APIs have been registered") self.features.remove(Feature.RPCS) logger.info( LBullets(f"Enabled features ({len(self.features)})", items=[f.value for f in self.features])) disabled_features = set(ALL_FEATURES) - set(self.features) logger.info( LBullets( f"Disabled features ({len(disabled_features)})", items=[f.value for f in disabled_features], )) # Api logging logger.info( LBullets( "APIs in registry ({})".format(len(self.api_registry.all())), items=self.api_registry.names(), )) # Push all registered APIs into the global schema for api in self.api_registry.all(): await self.schema.add_api(api) # We're running as a worker now (e.g. lightbus run), so # do the lazy loading immediately await self.lazy_load_now() # Setup schema monitoring monitor_task = asyncio.ensure_future( queue_exception_checker(self.schema.monitor(), self.error_queue)) logger.info("Executing before_worker_start & on_start hooks...") await self.hook_registry.execute("before_worker_start") logger.info( "Execution of before_worker_start & on_start hooks was successful") # Setup RPC consumption if Feature.RPCS in self.features: consume_rpc_task = asyncio.ensure_future( queue_exception_checker(self.consume_rpcs(), self.error_queue)) else: consume_rpc_task = None # Start off any registered event listeners if Feature.EVENTS in self.features: await self.event_client.start_registered_listeners() # Start off any background tasks if Feature.TASKS in self.features: for coroutine in self._background_coroutines: task = asyncio.ensure_future( queue_exception_checker(coroutine, self.error_queue)) self._background_tasks.append(task) self._worker_tasks.add(consume_rpc_task) self._worker_tasks.add(monitor_task)
def loop(self): return get_event_loop()