def serve_single(server_settings): main_start = server_settings.pop("main_start", None) main_stop = server_settings.pop("main_stop", None) if not server_settings.get("run_async"): # create new event_loop after fork loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) server_settings["loop"] = loop trigger_events(main_start, server_settings["loop"]) serve(**server_settings) trigger_events(main_stop, server_settings["loop"]) server_settings["loop"].close()
def serve_multiple(server_settings, workers): """Start multiple server processes simultaneously. Stop on interrupt and terminate signals, and drain connections when complete. :param server_settings: kw arguments to be passed to the serve function :param workers: number of workers to launch :param stop_event: if provided, is used as a stop signal :return: """ server_settings["reuse_port"] = True server_settings["run_multiple"] = True main_start = server_settings.pop("main_start", None) main_stop = server_settings.pop("main_stop", None) loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) trigger_events(main_start, loop) # Create a listening socket or use the one in settings sock = server_settings.get("sock") unix = server_settings["unix"] backlog = server_settings["backlog"] if unix: sock = bind_unix_socket(unix, backlog=backlog) server_settings["unix"] = unix if sock is None: sock = bind_socket(server_settings["host"], server_settings["port"], backlog=backlog) sock.set_inheritable(True) server_settings["sock"] = sock server_settings["host"] = None server_settings["port"] = None processes = [] def sig_handler(signal, frame): logger.info("Received signal %s. Shutting down.", Signals(signal).name) for process in processes: os.kill(process.pid, SIGTERM) signal_func(SIGINT, lambda s, f: sig_handler(s, f)) signal_func(SIGTERM, lambda s, f: sig_handler(s, f)) mp = multiprocessing.get_context("fork") for _ in range(workers): process = mp.Process( target=serve, kwargs=server_settings, ) process.daemon = True process.start() processes.append(process) for process in processes: process.join() # the above processes will block this until they're stopped for process in processes: process.terminate() trigger_events(main_stop, loop) sock.close() loop.close() remove_unix_socket(unix)
def serve(cls, primary: Optional[Sanic] = None) -> None: apps = list(cls._app_registry.values()) if not primary: try: primary = apps[0] except IndexError: raise RuntimeError("Did not find any applications.") reloader_start = primary.listeners.get("reload_process_start") reloader_stop = primary.listeners.get("reload_process_stop") # We want to run auto_reload if ANY of the applications have it enabled if ( cls.should_auto_reload() and os.environ.get("SANIC_SERVER_RUNNING") != "true" ): # no cov loop = new_event_loop() trigger_events(reloader_start, loop, primary) reload_dirs: Set[Path] = primary.state.reload_dirs.union( *(app.state.reload_dirs for app in apps) ) reloader_helpers.watchdog(1.0, reload_dirs) trigger_events(reloader_stop, loop, primary) return # This exists primarily for unit testing if not primary.state.server_info: # no cov for app in apps: app.state.server_info.clear() return primary_server_info = primary.state.server_info[0] primary.before_server_start(partial(primary._start_servers, apps=apps)) try: primary_server_info.stage = ServerStage.SERVING if primary.state.workers > 1 and os.name != "posix": # no cov logger.warn( f"Multiprocessing is currently not supported on {os.name}," " using workers=1 instead" ) primary.state.workers = 1 if primary.state.workers == 1: serve_single(primary_server_info.settings) elif primary.state.workers == 0: raise RuntimeError("Cannot serve with no workers") else: serve_multiple( primary_server_info.settings, primary.state.workers ) except BaseException: error_logger.exception( "Experienced exception while trying to serve" ) raise finally: primary_server_info.stage = ServerStage.STOPPED logger.info("Server Stopped") for app in apps: app.state.server_info.clear() app.router.reset() app.signal_router.reset()