def main(): """Creates scheduler, fills it up with tasks and runs it""" init_logging() LOGGER.info("Opening port [%s] for prometheus", PROMETHEUS_PORT) loop = asyncio.get_event_loop() status_app = create_status_app(LOGGER) _, status_site = create_status_runner(status_app, int(PROMETHEUS_PORT), LOGGER, loop) loop.run_until_complete(status_site.start()) loop.run_until_complete(a_ensure_minimal_schema_version()) signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT) for sin in signals: signal.signal(sin, terminate) for job_info in JOBS: job_name, interval = job_info.split(':') job = import_job(job_name) if job: run_on_startup = job_name in JOBS_STARTUP if run_on_startup: next_run_time = datetime.now() else: next_run_time = undefined LOGGER.info('Adding job: %s, cadence each %s minutes, run on startup: %s', job_name, interval, run_on_startup) SCHEDULER.add_job(job.run, IntervalTrigger(minutes=int(interval)), id=job_name, next_run_time=next_run_time) else: LOGGER.error('Couldn\'t find job data for job: %s', job_name) SCHEDULER.start() MAIN_LOOP.start() LOGGER.info("Stopped.")
def main(): # pylint: disable=too-many-statements """Main kafka listener entrypoint.""" init_logging() loop = asyncio.get_event_loop() status_app = create_status_app(LOGGER) _, status_site = create_status_runner(status_app, int(PROMETHEUS_PORT), LOGGER, loop) loop.run_until_complete(status_site.start()) loop.run_until_complete(a_ensure_minimal_schema_version()) LOGGER.info("Starting upload listener.") signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT) for sig in signals: loop.add_signal_handler( sig, lambda sig=sig: loop.create_task(terminate(sig, loop))) ListenerCtx.set_listener_ctx() with DatabasePool(WORKER_THREADS): # prepare repo name to id cache db_init_repo_cache() LISTENER_QUEUE.listen(process_message) # wait until loop is stopped from terminate callback loop.run_forever() LOGGER.info("Shutting down.") ListenerCtx.executor.shutdown()
def main(): """Main VMaaS listener entrypoint.""" init_logging() loop = asyncio.get_event_loop() status_app = create_status_app(LOGGER) _, status_site = create_status_runner( status_app, int(CFG.prometheus_port or CFG.vmaas_sync_prometheus_port), LOGGER, loop, ) loop.run_until_complete(status_site.start()) loop.run_until_complete(a_ensure_minimal_schema_version()) LOGGER.info("Starting VMaaS sync service.") with DatabasePool(1): app_cont = VmaasSyncContext() def terminate(*_): """Trigger shutdown.""" LOGGER.info("Signal received, stopping application.") loop.add_callback_from_signal(app_cont.app.shutdown) signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT) for sig in signals: signal.signal(sig, terminate) web.run_app(app_cont.app, port=CFG.private_port) LOGGER.info("Shutting down.")
def main(): """Application entrypoint""" init_logging() status_app = create_status_app(LOGGER) _, status_site = create_status_runner(status_app, int(PROMETHEUS_PORT), LOGGER, MAIN_LOOP) MAIN_LOOP.run_until_complete(status_site.start()) MAIN_LOOP.run_until_complete(a_ensure_minimal_schema_version()) MAIN_LOOP.run_until_complete(setup_db_pool()) MAIN_LOOP.run_until_complete(db_init_caches()) LOGGER.info('Starting advisor listener.') MAIN_LOOP.run_until_complete(run())
def main(): """Sets up and run whole application""" # Set up endpoint for prometheus monitoring LOGGER.info("Opening port [%s] for prometheus", PROMETHEUS_PORT) init_logging() status_app = create_status_app(LOGGER) _, status_site = create_status_runner(status_app, int(PROMETHEUS_PORT), LOGGER, MAIN_LOOP) MAIN_LOOP.run_until_complete(status_site.start()) MAIN_LOOP.run_until_complete(a_ensure_minimal_schema_version()) LOGGER.info("Using BOOTSTRAP_SERVERS: %s", CFG.bootstrap_servers) LOGGER.info("Using GROUP_ID: %s", CFG.group_id) LOGGER.info("Using TOPICS: %s", ", ".join(CFG.evaluator_topics)) MAIN_LOOP.run_until_complete(run())