Пример #1
0
def main():
    args = vars(parse_args())
    debug = args["debug"]
    parser_config(args["zone_id"], args["config"])
    init_logging(debug)
    r = Replicator()
    r.start()
 def test_init_logging_no_cw(self, caplog):
     """test_init_logging_no_cw"""
     with caplog.at_level(logging.INFO):
         init_logging(num_servers=4)
     assert caplog.records[
         0].msg == "CloudWatch logging disabled due to missing access key"
     caplog.clear()
Пример #3
0
def main():
    """Sets up and run whole application"""
    # Set up endpoint for prometheus monitoring
    init_logging()
    LOGGER.info("Opening port [%s] for prometheus", prometheus_port)
    start_http_server(int(prometheus_port))
    run_evaluator()
Пример #4
0
def main(filename):
    """ Main loop."""
    init_logging()
    init_db()
    db_instance = DatabaseHandler.get_connection()
    data = JsonPkgTree(db_instance, filename)
    data.dump()
Пример #5
0
def main():
    args = vars(parse_args())
    debug = args["debug"]
    parser_config(args["zone_id"], args["config"])
    init_logging(debug)
    r = Apply()
    asyncio.run(r.start())
Пример #6
0
    def run_task(*args, **kwargs):
        """Function to import all repositories from input list to the DB."""
        try:
            products = kwargs.get("products", None)
            repos = kwargs.get("repos", None)
            init_logging()
            init_db()

            if products:
                product_store = ProductStore()
                product_store.store(products)

            if repos:
                repository_controller = RepositoryController()
                # Sync repos from input
                for repo_url, content_set, basearch, releasever, cert_name, ca_cert, cert, key in repos:
                    repository_controller.add_repository(repo_url,
                                                         content_set,
                                                         basearch,
                                                         releasever,
                                                         cert_name=cert_name,
                                                         ca_cert=ca_cert,
                                                         cert=cert,
                                                         key=key)
                repository_controller.import_repositories()
        except Exception as err:  # pylint: disable=broad-except
            msg = "Internal server error <%s>" % err.__hash__()
            LOGGER.exception(msg)
            DatabaseHandler.rollback()
            return "ERROR"
        return "OK"
def main():  # pylint: disable=too-many-statements
    """Main kafka listener entrypoint."""
    init_logging()

    loop = asyncio.get_event_loop()
    status_app = create_status_app(LOGGER)
    _, status_site = create_status_runner(status_app, int(PROMETHEUS_PORT),
                                          LOGGER, loop)
    loop.run_until_complete(status_site.start())

    loop.run_until_complete(a_ensure_minimal_schema_version())
    LOGGER.info("Starting upload listener.")

    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sig in signals:
        loop.add_signal_handler(
            sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))

    ListenerCtx.set_listener_ctx()

    with DatabasePool(WORKER_THREADS):
        # prepare repo name to id cache
        db_init_repo_cache()
        LISTENER_QUEUE.listen(process_message)

        # wait until loop is stopped from terminate callback
        loop.run_forever()

        LOGGER.info("Shutting down.")
        ListenerCtx.executor.shutdown()
Пример #8
0
def main():
    """Main kafka listener entrypoint."""
    start_http_server(int(PROMETHEUS_PORT))
    init_logging()
    init_db()
    LOGGER.info("Starting upload listener.")
    # get DB connection
    conn = DatabaseHandler.get_connection()

    loop = asyncio.get_event_loop()
    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sig in signals:
        loop.add_signal_handler(
            sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))
    executor = BoundedExecutor(MAX_QUEUE_SIZE, max_workers=WORKER_THREADS)

    def process_message(msg):
        """Message processing logic"""
        PROCESS_MESSAGES.inc()
        LOGGER.info('Received message from topic %s: %s', msg.topic, msg.value)

        try:
            msg_dict = json.loads(msg.value.decode("utf8"))
        except json.decoder.JSONDecodeError:
            MESSAGE_PARSE_ERROR.inc()
            LOGGER.exception("Unable to parse message: ")
            return

        if msg.topic == mqueue.UPLOAD_TOPIC:
            process_func = process_upload
        elif msg.topic == mqueue.EVENTS_TOPIC:
            if msg_dict['type'] == 'delete':
                process_func = process_delete
            else:
                UNKNOWN_EVENT_TYPE.inc()
                LOGGER.error("Received unknown event type: %s",
                             msg_dict['type'])
                return
        else:
            UNKNOWN_TOPIC.inc()
            LOGGER.error("Received message on unsupported topic: %s",
                         msg.topic)
            return

        if 'id' not in msg_dict or msg_dict["id"] is None:
            MISSING_ID.inc()
            LOGGER.warning(
                "Unable to process message, inventory ID is missing.")
            return

        future = executor.submit(process_func, msg_dict, conn, loop=loop)
        future.add_done_callback(on_thread_done)

    LISTENER_QUEUE.listen(process_message)

    # wait until loop is stopped from terminate callback
    loop.run_forever()

    LOGGER.info("Shutting down.")
    executor.shutdown()
Пример #9
0
def main():
    """Creates scheduler, fills it up with tasks and runs it"""
    init_logging()

    LOGGER.info("Opening port [%s] for prometheus", PROMETHEUS_PORT)

    loop = asyncio.get_event_loop()
    status_app = create_status_app(LOGGER)
    _, status_site = create_status_runner(status_app, int(PROMETHEUS_PORT), LOGGER, loop)
    loop.run_until_complete(status_site.start())

    loop.run_until_complete(a_ensure_minimal_schema_version())

    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sin in signals:
        signal.signal(sin, terminate)

    for job_info in JOBS:
        job_name, interval = job_info.split(':')
        job = import_job(job_name)
        if job:
            run_on_startup = job_name in JOBS_STARTUP
            if run_on_startup:
                next_run_time = datetime.now()
            else:
                next_run_time = undefined
            LOGGER.info('Adding job: %s, cadence each %s minutes, run on startup: %s', job_name, interval, run_on_startup)
            SCHEDULER.add_job(job.run, IntervalTrigger(minutes=int(interval)), id=job_name, next_run_time=next_run_time)
        else:
            LOGGER.error('Couldn\'t find job data for job: %s', job_name)
    SCHEDULER.start()
    MAIN_LOOP.start()
    LOGGER.info("Stopped.")
Пример #10
0
def main():
    """Main VMaaS listener entrypoint."""
    init_logging()

    loop = asyncio.get_event_loop()
    status_app = create_status_app(LOGGER)
    _, status_site = create_status_runner(
        status_app,
        int(CFG.prometheus_port or CFG.vmaas_sync_prometheus_port),
        LOGGER,
        loop,
    )
    loop.run_until_complete(status_site.start())

    loop.run_until_complete(a_ensure_minimal_schema_version())

    LOGGER.info("Starting VMaaS sync service.")
    with DatabasePool(1):
        app_cont = VmaasSyncContext()

        def terminate(*_):
            """Trigger shutdown."""
            LOGGER.info("Signal received, stopping application.")
            loop.add_callback_from_signal(app_cont.app.shutdown)

        signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
        for sig in signals:
            signal.signal(sig, terminate)

        web.run_app(app_cont.app, port=CFG.private_port)

    LOGGER.info("Shutting down.")
Пример #11
0
def main(filename):
    """ Main loop."""
    init_logging()
    init_db()
    db_instance = DatabaseHandler.get_connection()
    #data = DataDump(db.cursor(), filename)
    data = DataDump(db_instance, filename)
    data.dump()
def main():
    """Main platform mock entrypoint."""
    init_logging()
    if not os.path.exists(STORAGE_PATH):
        os.makedirs(STORAGE_PATH)
    LOGGER.info("Starting platform mock.")
    app = ServerApplication()
    app.listen(8000)
    app.start()
Пример #13
0
def main():
    """Main VMaaS listener entrypoint."""
    start_http_server(int(PROMETHEUS_PORT))
    init_logging()
    init_db()
    LOGGER.info("Starting VMaaS sync service.")
    app = ServerApplication()
    app.listen(8000)
    app.start()
Пример #14
0
def main():
    """Sets up and run whole application"""
    # Set up endpoint for prometheus monitoring
    init_logging()
    LOGGER.info("Using BOOTSTRAP_SERVERS: %s", mqueue.BOOTSTRAP_SERVERS)
    LOGGER.info("Using GROUP_ID: %s", mqueue.GROUP_ID)
    LOGGER.info("Using TOPICS: %s", ", ".join(kafka_evaluator_topic))
    LOGGER.info("Opening port [%s] for prometheus", prometheus_port)
    start_http_server(int(prometheus_port))
    MAIN_LOOP.run_until_complete(run())
 def test_init_logging_cw_err(self, caplog):
     """test_init_logging_cw_err"""
     os.environ["CW_AWS_ACCESS_KEY_ID"] = "cw-aws-access-key-id"
     os.environ["CW_AWS_SECRET_ACCESS_KEY"] = "cw-aws-secret-access-key"
     with caplog.at_level(logging.INFO):
         init_logging(num_servers=4)
     assert caplog.records[0].msg.startswith(
         "Unable to enable CloudWatch logging:")
     caplog.clear()
     del os.environ["CW_AWS_ACCESS_KEY_ID"]
     del os.environ["CW_AWS_SECRET_ACCESS_KEY"]
Пример #16
0
 def run_task(*args, **kwargs):
     """Function to start syncing all CVEs."""
     try:
         init_logging()
         init_db()
         controller = CvemapController()
         controller.store()
     except Exception as err:  # pylint: disable=broad-except
         msg = "Internal server error <%s>" % err.__hash__()
         LOGGER.exception(msg)
         DatabaseHandler.rollback()
         return "ERROR"
     return "OK"
Пример #17
0
def main():
    """Main entrypoint."""
    init_logging()
    LOGGER.info("Starting (version %s).", VMAAS_VERSION)
    sync_interval = int(os.getenv('REPOSCAN_SYNC_INTERVAL_MINUTES',
                                  "720")) * 60000
    if sync_interval > 0:
        PeriodicCallback(periodic_sync, sync_interval).start()
    else:
        LOGGER.info("Periodic syncing disabled.")
    app = ReposcanApplication()
    app.listen(8081)
    IOLoop.instance().start()
Пример #18
0
 def run_task(*args, **kwargs):
     """Function to start deleting repos."""
     try:
         repo = kwargs.get("repo", None)
         init_logging()
         init_db()
         repository_controller = RepositoryController()
         repository_controller.delete_content_set(repo)
     except Exception as err:  # pylint: disable=broad-except
         msg = "Internal server error <%s>" % err.__hash__()
         LOGGER.exception(msg)
         DatabaseHandler.rollback()
         return "ERROR"
     return "OK"
Пример #19
0
 def run_task(*args, **kwargs):
     """Function to start syncing all repositories available from database."""
     try:
         init_logging()
         init_db()
         repository_controller = RepositoryController()
         repository_controller.add_db_repositories()
         repository_controller.store()
     except Exception as err:  # pylint: disable=broad-except
         msg = "Internal server error <%s>" % err.__hash__()
         LOGGER.exception(msg)
         DatabaseHandler.rollback()
         return "ERROR"
     return "OK"
Пример #20
0
def main():
    """Application entrypoint"""
    init_logging()

    status_app = create_status_app(LOGGER)
    _, status_site = create_status_runner(status_app, int(PROMETHEUS_PORT),
                                          LOGGER, MAIN_LOOP)
    MAIN_LOOP.run_until_complete(status_site.start())
    MAIN_LOOP.run_until_complete(a_ensure_minimal_schema_version())

    MAIN_LOOP.run_until_complete(setup_db_pool())
    MAIN_LOOP.run_until_complete(db_init_caches())
    LOGGER.info('Starting advisor listener.')
    MAIN_LOOP.run_until_complete(run())
Пример #21
0
def main():
    """Creates schleduler, fills it up with tasks and runs it"""
    init_logging()
    scheduler = BlockingScheduler()

    for job_info in JOBS:
        job_name, interval = job_info.split(':')
        job = import_job(job_name)
        if job:
            LOGGER.info('Adding job: %s, cadence each %s minutes', job_name, interval)
            scheduler.add_job(job.run, IntervalTrigger(minutes=int(interval)))
        else:
            LOGGER.error('Couldn\'t find job data for job: %s', job_name)
    scheduler.start()
Пример #22
0
def create_app():
    """Create reposcan app."""
    init_logging()
    LOGGER.info("Starting (version %s).", VMAAS_VERSION)
    sync_interval = int(os.getenv('REPOSCAN_SYNC_INTERVAL_MINUTES',
                                  "360")) * 60000
    if sync_interval > 0:
        PeriodicCallback(periodic_sync, sync_interval).start()
    else:
        LOGGER.info("Periodic syncing disabled.")
    app = ReposcanApplication()
    app.listen(8081)

    app.websocket_reconnect()
    app.reconnect_callback = PeriodicCallback(
        app.websocket_reconnect, WEBSOCKET_RECONNECT_INTERVAL * 1000)
    app.reconnect_callback.start()
Пример #23
0
def main():
    """Sets up and run whole application"""
    # Set up endpoint for prometheus monitoring
    LOGGER.info("Opening port [%s] for prometheus", PROMETHEUS_PORT)
    init_logging()

    status_app = create_status_app(LOGGER)
    _, status_site = create_status_runner(status_app, int(PROMETHEUS_PORT),
                                          LOGGER, MAIN_LOOP)
    MAIN_LOOP.run_until_complete(status_site.start())

    MAIN_LOOP.run_until_complete(a_ensure_minimal_schema_version())

    LOGGER.info("Using BOOTSTRAP_SERVERS: %s", CFG.bootstrap_servers)
    LOGGER.info("Using GROUP_ID: %s", CFG.group_id)
    LOGGER.info("Using TOPICS: %s", ", ".join(CFG.evaluator_topics))
    MAIN_LOOP.run_until_complete(run())
Пример #24
0
def main():
    """Main kafka listener entrypoint."""
    start_http_server(int(PROMETHEUS_PORT))
    init_logging()
    init_db()
    LOGGER.info("Starting upload listener.")
    # get DB connection
    conn = DatabaseHandler.get_connection()

    session = requests.Session()
    loop = asyncio.get_event_loop()
    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sig in signals:
        loop.add_signal_handler(
            sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))
    executor = ThreadPoolExecutor(WORKER_THREADS)

    def process_message(msg):
        """Message processing logic"""
        PROCESS_UPLOAD.inc()
        LOGGER.info('Received message from topic %s: %s', msg.topic, msg.value)

        upload_data = json.loads(msg.value.decode("utf8"))

        # Inventory ID is missing
        if 'id' not in upload_data or upload_data["id"] is None:
            MISSING_ID.inc()
            LOGGER.warning("Unable to store system, inventory ID is missing.")
            return

        future = executor.submit(process_upload,
                                 upload_data,
                                 session,
                                 conn,
                                 loop=loop)
        future.add_done_callback(on_thread_done)

    UPLOAD_QUEUE.listen(process_message)

    # wait until loop is stopped from terminate callback
    loop.run_forever()

    LOGGER.info("Shutting down.")
    executor.shutdown()
    session.close()
Пример #25
0
def main():
    """Main platform mock entrypoint."""
    init_logging()
    if not os.path.exists(STORAGE_PATH):
        os.makedirs(STORAGE_PATH)
    LOGGER.info("Starting platform mock.")
    app = ServerApplication()
    app.listen(8000)

    def terminate(*_):
        """Trigger shutdown."""
        LOGGER.info("Signal received, stopping application.")
        IOLoop.instance().add_callback_from_signal(app.stop)

    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sig in signals:
        signal.signal(sig, terminate)

    app.start()
    LOGGER.info("Shutting down.")
Пример #26
0
def main():
    """Main VMaaS listener entrypoint."""
    start_http_server(int(PROMETHEUS_PORT))
    init_logging()
    LOGGER.info("Starting VMaaS sync service.")
    with DatabasePool(1):
        app = ServerApplication()
        app.listen(8000)

        def terminate(*_):
            """Trigger shutdown."""
            LOGGER.info("Signal received, stopping application.")
            IOLoop.instance().add_callback_from_signal(app.stop)

        signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
        for sig in signals:
            signal.signal(sig, terminate)

        app.start()
    LOGGER.info("Shutting down.")
Пример #27
0
def main():
    """Creates schleduler, fills it up with tasks and runs it"""
    init_logging()

    LOGGER.info("Opening port [%s] for prometheus", PROMETHEUS_PORT)
    start_http_server(int(PROMETHEUS_PORT))

    ensure_minimal_schema_version()

    scheduler = BlockingScheduler()

    for job_info in JOBS:
        job_name, interval = job_info.split(':')
        job = import_job(job_name)
        if job:
            LOGGER.info('Adding job: %s, cadence each %s minutes', job_name,
                        interval)
            scheduler.add_job(job.run, IntervalTrigger(minutes=int(interval)))
        else:
            LOGGER.error('Couldn\'t find job data for job: %s', job_name)
    scheduler.start()
Пример #28
0
def main():
    """Sets up and run whole application"""
    # Set up endpoint for prometheus monitoring
    init_logging()
    upgrader = DatabaseUpgrade()
    upgrader.upgrade()
def main():  # pylint: disable=too-many-statements
    """Main kafka listener entrypoint."""
    start_http_server(int(PROMETHEUS_PORT))
    init_logging()
    LOGGER.info("Starting upload listener.")

    loop = asyncio.get_event_loop()
    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sig in signals:
        loop.add_signal_handler(
            sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))
    executor = BoundedExecutor(MAX_QUEUE_SIZE, max_workers=WORKER_THREADS)

    def process_message(msg):  # pylint: disable=too-many-return-statements,too-many-branches
        """Message processing logic"""
        PROCESS_MESSAGES.inc()
        LOGGER.info('Received message from topic %s: %s', msg.topic, msg.value)

        try:
            msg_dict = json.loads(msg.value.decode("utf8"))
        except json.decoder.JSONDecodeError:
            MESSAGE_PARSE_ERROR.inc()
            LOGGER.exception("Unable to parse message: ")
            return
        FailedCache.process_failed_cache(FailedCache.upload_cache, executor,
                                         process_upload, loop)
        FailedCache.process_failed_cache(FailedCache.delete_cache, executor,
                                         process_delete, loop)

        if msg.topic == mqueue.UPLOAD_TOPIC:
            if not validate_msg(msg_dict, "upload",
                                REQUIRED_UPLOAD_MESSAGE_FIELDS):
                return
            # send message to payload tracker
            send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                        msg_dict,
                                        'received',
                                        loop=loop)
            # proces only archives from smart_management accounts
            identity = get_identity(
                msg_dict["platform_metadata"]["b64_identity"])
            if identity is None:
                INVALID_IDENTITY.inc()
                error_msg = "Skipped upload due to invalid identity header."
                LOGGER.warning(error_msg)
                send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                            msg_dict,
                                            'error',
                                            status_msg=error_msg,
                                            loop=loop)
                return
            if not is_entitled_smart_management(identity,
                                                allow_missing_section=True):
                MISSING_SMART_MANAGEMENT.inc()
                error_msg = "Skipped upload due to missing smart_management entitlement."
                LOGGER.debug(error_msg)
                send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                            msg_dict,
                                            'error',
                                            status_msg=error_msg,
                                            loop=loop)
                return
            process_func = process_upload
        elif msg.topic == mqueue.EVENTS_TOPIC:
            if not validate_msg(msg_dict, "event",
                                REQUIRED_EVENT_MESSAGE_FIELDS):
                return
            if msg_dict['type'] == 'delete':
                process_func = process_delete
            else:
                UNKNOWN_EVENT_TYPE.inc()
                LOGGER.error("Received unknown event type: %s",
                             msg_dict['type'])
                return
        else:
            UNKNOWN_TOPIC.inc()
            LOGGER.error("Received message on unsupported topic: %s",
                         msg.topic)
            return

        future = executor.submit(process_func, msg_dict, loop=loop)
        future.add_done_callback(on_thread_done)

    with DatabasePool(WORKER_THREADS):
        # prepare repo name to id cache
        db_init_repo_cache()
        LISTENER_QUEUE.listen(process_message)

        # wait until loop is stopped from terminate callback
        loop.run_forever()

        LOGGER.info("Shutting down.")
        executor.shutdown()
Пример #30
0
 def test_cw_disabled(self, caplog):
     """tests disabled cloudwatch logging"""
     with caplog.at_level(logging.INFO):
         init_logging(num_servers=4)
     assert caplog.records[0].msg == "CloudWatch logging disabled"
     caplog.clear()