async def init_site(self, site_name): if getattr(self, site_name, None): await getattr(self, site_name).stop() if site_name == "https_site": if not self.config.has_option("global", "https_port"): return port = self.config.getint("global", "https_port") ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) ssl_context.load_cert_chain( certfile=config.get("global", "certfile"), keyfile=config.get("global", "keyfile")) if self.cert_watcher is None: self.cert_watcher = loop.create_task(self.watch_cert()) else: if not self.config.has_option("global", "http_port"): return port = self.config.getint("global", "http_port") ssl_context = None log(__name__).info("Starting fresh site %s on port %d", site_name, port) site = TCPSite(self.runner, config.get("global", "host"), port, ssl_context=ssl_context, reuse_address=True) setattr(self, site_name, site) await site.start()
async def run_server(self): app = web.Application() app.add_routes([web.get('/', self.serve_image)]) runner = AppRunner(app) await runner.setup() site = TCPSite(runner, "0.0.0.0", 8080) await site.start()
async def async_main(loop: asyncio.AbstractEventLoop) -> None: jira_oauth = JiraOAuth.from_file() jira_oauth.app = Application() jira_oauth.app.add_routes([web.get('/', jira_oauth.process_oauth_result)]) runner = AppRunner(app=jira_oauth.app) await runner.setup() site = TCPSite(runner=runner) await site.start() jira_oauth.redirect_url = 'http://localhost:8080/' jira_oauth.loop = loop await jira_oauth.generate_request_token_and_auth_url() jira_oauth_console = JiraOAuthConsole(jira_oauth=jira_oauth) print( f"Request Token: oauth_token={jira_oauth.request_token['oauth_token']}, " f"oauth_token_secret={jira_oauth.request_token['oauth_token_secret']}") print() jira_oauth_console.print_url() await jira_oauth.generate_access_token() print() print( f"Access Token: oauth_token={jira_oauth.access_token['oauth_token']}, " f"oauth_token_secret={jira_oauth.access_token['oauth_token_secret']}") print( "You may now access protected resources using the access tokens above." ) print() await jira_oauth_console.check_access_token()
async def start(self): app, *_ = serve_static(static_path=str(self.config.dist_dir), port=self.port) self.runner = AppRunner(app, access_log=None) await self.runner.setup() site = TCPSite(self.runner, HOST, self.port, shutdown_timeout=0.01) await site.start()
def run_app(app, port, loop): runner = AppRunner(app, access_log=None) loop.run_until_complete(runner.setup()) site = TCPSite(runner, HOST, port, shutdown_timeout=0.01) loop.run_until_complete(site.start()) try: loop.run_forever() except KeyboardInterrupt: # pragma: no branch pass finally: logger.info('shutting down server...') start = loop.time() with contextlib.suppress(asyncio.TimeoutError, KeyboardInterrupt): loop.run_until_complete(runner.cleanup()) logger.debug('shutdown took %0.2fs', loop.time() - start)
async def websocket_server(loop, free_port): host = 'localhost' runner = AppRunner(websocket_application) await runner.setup() tcpsite = TCPSite(runner, host, free_port, shutdown_timeout=2) await tcpsite.start() yield tcpsite.name await runner.shutdown() await runner.cleanup()
async def setup(self): await self.runner.setup() self.site = TCPSite(self.runner, None, self.port, ssl_context=self.ssl, shutdown_timeout=SHUTDOWN_TIMEOUT, backlog=BACKLOG, reuse_address=True, reuse_port=True) await self.site.start() log(__name__).info("DOH_Handler started")
async def prepare(self) -> None: await self.handler.prepare() await self.runner.setup() self.sites = [] self.sites.append( TCPSite( self.runner, self.host, self.port, shutdown_timeout=self.shutdown_timeout, ssl_context=self.ssl_context, backlog=self.backlog, reuse_address=self.reuse_address, reuse_port=self.reuse_port, ))
async def main(port=8000): session = aiohttp.ClientSession() async_http_client = AiohttpAsyncHttpClient(session) line_bot_api = AsyncLineBotApi(channel_access_token, async_http_client) parser = WebhookParser(channel_secret) handler = Handler(line_bot_api, parser) app = web.Application() app.add_routes([web.post('/callback', handler.echo)]) runner = web.AppRunner(app) await runner.setup() site = TCPSite(runner=runner, port=port) await site.start() while True: await asyncio.sleep(3600) # sleep forever
async def _start_static_server(self): self._port = find_free_port() self._runner = AppRunner( self._app, handle_signals=True, access_log_class=AccessLogger, access_log_format=AccessLogger.LOG_FORMAT, access_log=logging.getLogger(__name__), ) await self._runner.setup() self._site = TCPSite( self._runner, port=self._port, ) await self._site.start()
async def run_app(self, app, host, port): self.runner = AppRunner(app) await self.runner.setup() sites = [] log(__name__).info("Created HTTP endpoint %s:%d", host, port) sites.append( TCPSite(self.runner, host, port, shutdown_timeout=SHUTDOWN_TIMEOUT, backlog=BACKLOG, reuse_address=True, reuse_port=True)) for site in sites: await site.start()
async def start_server() -> Iterable[None]: handler = functools.partial(handle_rpc, serializer=JsonSerializer(), bstream=SimpleBlockStream()) server = AIOHttpTransportServer(process_request=handler, ip=server_addr, port=PORT, ssl_cert=SSL_CERT, ssl_key=SSL_KEY, api_key_enc=API_KEY_ENC, settings={"serializer": "json", "bstream": "simple"}) app = server.make_app() runner = AppRunner(app) await runner.setup() site = TCPSite(runner, server_addr, PORT, ssl_context=server.make_ssl_context()) await site.start() try: yield finally: await runner.cleanup()
async def run_app(self, app: Application, host: str = "0.0.0.0", port: int = 7070) -> None: """Runs application and blocks until stopped. Args: app: Application instance. host: TCP/IP hostname to serve on. port: TCP/IP port to serve on. """ _LOG.info("Starting app", extra={"host": host, "port": port}) runner = AppRunner(app=app) await runner.setup() site = TCPSite(runner=runner, host=host, port=port) await site.start() await self._stopped.wait() _LOG.debug("Stopping app", extra={"host": host, "port": port}) await app["scheduler"].close() await runner.cleanup()
async def run_app( context: BeanContext, app: Union[Application, Awaitable[Application]], *, host: Optional[str] = None, port: Optional[int] = None, path: Optional[str] = None, sock: Optional[socket.socket] = None, shutdown_timeout: float = 60.0, ssl_context: Optional[SSLContext] = None, print: Callable[..., None] = print, backlog: int = 128, access_log_class: Type[AbstractAccessLogger] = AccessLogger, access_log_format: str = AccessLogger.LOG_FORMAT, access_log: Optional[logging.Logger] = access_logger, handle_signals: bool = True, reuse_address: Optional[bool] = None, reuse_port: Optional[bool] = None ) -> None: loop = context.loop stop_event = asyncio.Event(loop=loop) def stop(**kwargs): stop_event.set() for sig in ("SIGINT", "SIGTERM"): await context.add_signal_handler(sig, stop) # A internal functio to actually do all dirty job for application running if asyncio.iscoroutine(app): app = await app # type: ignore app = cast(Application, app) runner = AppRunner( app, handle_signals=handle_signals, access_log_class=access_log_class, access_log_format=access_log_format, access_log=access_log, ) await runner.setup() sites = [] # type: List[BaseSite] try: if host is not None: if isinstance(host, (str, bytes, bytearray, memoryview)): sites.append( TCPSite( runner, host, port, shutdown_timeout=shutdown_timeout, ssl_context=ssl_context, backlog=backlog, reuse_address=reuse_address, reuse_port=reuse_port, ) ) else: for h in host: sites.append( TCPSite( runner, h, port, shutdown_timeout=shutdown_timeout, ssl_context=ssl_context, backlog=backlog, reuse_address=reuse_address, reuse_port=reuse_port, ) ) elif path is None and sock is None or port is not None: sites.append( TCPSite( runner, port=port, shutdown_timeout=shutdown_timeout, ssl_context=ssl_context, backlog=backlog, reuse_address=reuse_address, reuse_port=reuse_port, ) ) if path is not None: if isinstance(path, (str, bytes, bytearray, memoryview)): sites.append( UnixSite( runner, path, shutdown_timeout=shutdown_timeout, ssl_context=ssl_context, backlog=backlog, ) ) else: for p in path: sites.append( UnixSite( runner, p, shutdown_timeout=shutdown_timeout, ssl_context=ssl_context, backlog=backlog, ) ) if sock is not None: if not isinstance(sock, Iterable): sites.append( SockSite( runner, sock, shutdown_timeout=shutdown_timeout, ssl_context=ssl_context, backlog=backlog, ) ) else: for s in sock: sites.append( SockSite( runner, s, shutdown_timeout=shutdown_timeout, ssl_context=ssl_context, backlog=backlog, ) ) for site in sites: await site.start() app.aio_pod_context = context if print: # pragma: no branch names = sorted(str(s.name) for s in runner.sites) print("======== Running on {} ========\n".format(", ".join(names))) await stop_event.wait() finally: await runner.cleanup()
async def run(self): runner = AppRunner(self.app, handle_signals=True) await runner.setup() site = TCPSite(runner, port=self.port, shutdown_timeout=60.0) await site.start()
async def start_server(handler, host, port): server = Server(handler) runner = ServerRunner(server) await runner.setup() site = TCPSite(runner, host, port) await site.start()
def main(): """ entry point """ parser = argparse.ArgumentParser( description='Distributed Cronlike Scheduler') parser.add_argument('-l', '--log-file', default=None, help='path to store logfile') parser.add_argument('-p', '--storage-path', default=None, help='directory where to store cache') parser.add_argument('-u', '--udp-communication-port', type=int, default=12345, help='communication port (default: 12345)') parser.add_argument('-i', '--broadcast-interval', type=int, default=5, help='interval for broadcasting data over UDP') parser.add_argument( '-c', '--cron', default=None, help= 'crontab to use (default: /etc/crontab, use `memory` to not save to file' ) parser.add_argument('-d', '--cron-user', default=None, help='user for storing cron entries') parser.add_argument('-w', '--web-port', type=int, default=8080, help='web hosting port (default: 8080)') parser.add_argument( '-n', '--ntp-server', default='pool.ntp.org', help='NTP server to detect clock skew (default: pool.ntp.org)') parser.add_argument( '-s', '--node-staleness', type=int, default=180, help= 'Time in seconds of non-communication for a node to be marked as stale (defailt: 180s)' ) parser.add_argument( '-x', '--hash-key', default='abracadabra', help="String to use for verifying UDP traffic (to disable use '')") parser.add_argument('-v', '--verbose', action='store_true', default=False, help='verbose logging') args = parser.parse_args() if get_ntp_offset(args.ntp_server) > 60: exit("your clock is not in sync (check system NTP settings)") root_logger = logging.getLogger() if args.log_file: file_handler = logging.FileHandler(args.log_file) file_handler.setFormatter(logging.Formatter(log_format)) root_logger.addHandler(file_handler) if args.verbose: root_logger.setLevel(logging.DEBUG) else: root_logger.setLevel(logging.INFO) logging.getLogger('aiohttp').setLevel(logging.WARNING) pool = ThreadPoolExecutor(4) storage = Storage(args.storage_path) if args.cron: if args.cron == 'memory': processor = Processor(args.udp_communication_port, storage, cron=CronTab(tab="""* * * * * command""")) elif args.cron_user: processor = Processor(args.udp_communication_port, storage, cron=CronTab(tabfile=args.cron, user=args.cron_user), user=args.cron_user) else: processor = Processor(args.udp_communication_port, storage, cron=CronTab(tabfile=args.cron, user='******'), user='******') else: processor = Processor(args.udp_communication_port, storage, user='******') hash_key = None if args.hash_key != '': hash_key = args.hash_key with StatusProtocolServer(processor, args.udp_communication_port) as loop: running = True scheduler = Scheduler(storage, args.node_staleness) def timed_broadcast(): """ periodically broadcast system status and known jobs """ while running: broadcast( args.udp_communication_port, UdpSerializer.dump(Status(get_ip(), get_load()), hash_key)) for job in storage.cluster_jobs: if job.assigned_to == get_ip(): job.pid = check_process(job.command) for packet in UdpSerializer.dump(job, hash_key): client(args.udp_communication_port, packet) time.sleep(args.broadcast_interval) def timed_schedule(): """ periodically check if cluster needs re-balancing """ while running: time.sleep(23) if not scheduler.check_cluster_state(): logger.info("re-balancing cluster") jobs = storage.cluster_jobs.copy() for packet in UdpSerializer.dump( ReBalance(timestamp=datetime.now()), hash_key): client(args.udp_communication_port, packet) time.sleep(5) for job in jobs: for packet in UdpSerializer.dump(job, hash_key): client(args.udp_communication_port, packet) async def scheduled_broadcast(): await loop.run_in_executor(pool, timed_broadcast) async def scheduled_rebalance(): await loop.run_in_executor(pool, timed_schedule) async def save_schedule(): """ auto save every 100 seconds """ while running: await asyncio.sleep(100) await storage.save() logger.info("setting broadcast interval to {0} seconds".format( args.broadcast_interval)) loop.create_task(scheduled_broadcast()) loop.create_task(scheduled_rebalance()) if args.storage_path: loop.create_task(save_schedule()) logger.info( "starting web application server on http://{0}:{1}/".format( get_ip(), args.web_port)) if args.cron_user: s = Site(scheduler, storage, args.udp_communication_port, cron=processor.cron, user=args.cron_user, hash_key=hash_key) else: s = Site(scheduler, storage, args.udp_communication_port, cron=processor.cron, hash_key=hash_key) runner = AppRunner(s.app) loop.run_until_complete(runner.setup()) site_instance = TCPSite(runner, port=args.web_port) loop.run_until_complete(site_instance.start()) try: loop.run_forever() except: logger.info("interrupt received") logger.info("stopping web application") loop.run_until_complete(site_instance.stop()) running = False if args.storage_path: loop.create_task(storage.save()) logger.debug("waiting for background tasks to finish") pending_tasks = [ task for task in asyncio.Task.all_tasks() if not task.done() ] loop.run_until_complete(asyncio.gather(*pending_tasks)) logger.info("elvis has left the building")
def prepare_app(app, *, host=None, port=None, path=None, sock=None, shutdown_timeout=60.0, ssl_context=None, backlog=128, access_log_class=helpers.AccessLogger, access_log_format=helpers.AccessLogger.LOG_FORMAT, access_log=access_logger, handle_signals=True, reuse_address=None, reuse_port=None): """ Slightly modified version of aiohttp.web.run_app, where the server is not really started, but the coroutine is returned. This allows to caller to run multiple apps at once. """ loop = asyncio.get_event_loop() if asyncio.iscoroutine(app): app = loop.run_until_complete(app) runner = AppRunner(app, handle_signals=handle_signals, access_log_class=access_log_class, access_log_format=access_log_format, access_log=access_log) loop.run_until_complete(runner.setup()) sites = [] if host is not None: if isinstance(host, (str, bytes, bytearray, memoryview)): sites.append( TCPSite(runner, host, port, shutdown_timeout=shutdown_timeout, ssl_context=ssl_context, backlog=backlog, reuse_address=reuse_address, reuse_port=reuse_port)) else: for h in host: sites.append( TCPSite(runner, h, port, shutdown_timeout=shutdown_timeout, ssl_context=ssl_context, backlog=backlog, reuse_address=reuse_address, reuse_port=reuse_port)) elif path is None and sock is None or port is not None: sites.append( TCPSite(runner, port=port, shutdown_timeout=shutdown_timeout, ssl_context=ssl_context, backlog=backlog, reuse_address=reuse_address, reuse_port=reuse_port)) if path is not None: if isinstance(path, (str, bytes, bytearray, memoryview)): sites.append( UnixSite(runner, path, shutdown_timeout=shutdown_timeout, ssl_context=ssl_context, backlog=backlog)) else: for p in path: sites.append( UnixSite(runner, p, shutdown_timeout=shutdown_timeout, ssl_context=ssl_context, backlog=backlog)) if sock is not None: if not isinstance(sock, Iterable): sites.append( SockSite(runner, sock, shutdown_timeout=shutdown_timeout, ssl_context=ssl_context, backlog=backlog)) else: for s in sock: sites.append( SockSite(runner, s, shutdown_timeout=shutdown_timeout, ssl_context=ssl_context, backlog=backlog)) runner.prepared_sites = sites return runner