Exemplo n.º 1
0
def main():
    eventlet.monkey_patch()
    cfg.CONF(sys.argv[1:], project='akanda-rug')
    log.setup('akanda')

    mgr = manager.AkandaL3Manager()
    svc = PeriodicService(
        host=cfg.CONF.host, topic=L3_AGENT_TOPIC, manager=mgr
    )
    service.launch(svc).wait()
Exemplo n.º 2
0
def main():
    eventlet.monkey_patch()
    cfg.CONF(sys.argv[1:], project='akanda-rug')
    log.setup('akanda')

    mgr = manager.AkandaL3Manager()
    svc = PeriodicService(host=cfg.CONF.host,
                          topic=L3_AGENT_TOPIC,
                          manager=mgr)
    service.launch(svc).wait()
Exemplo n.º 3
0
def main(argv=sys.argv[1:]):
    # Change the process and thread name so the logs are cleaner.
    p = multiprocessing.current_process()
    p.name = "pmain"
    t = threading.current_thread()
    t.name = "tmain"

    register_and_load_opts()
    cfg.CONF(argv, project="akanda-rug")

    log.setup("akanda-rug")
    cfg.CONF.log_opt_values(LOG, logging.INFO)

    # Purge the mgt tap interface on startup
    quantum = quantum_api.Quantum(cfg.CONF)
    # TODO(mark): develop better way restore after machine reboot
    # quantum.purge_management_interface()

    # bring the mgt tap interface up
    quantum.ensure_local_service_port()

    # bring the external port
    if cfg.CONF.plug_external_port:
        quantum.ensure_local_external_port()

    # Set up the queue to move messages between the eventlet-based
    # listening process and the scheduler.
    notification_queue = multiprocessing.Queue()

    # Ignore signals that might interrupt processing.
    daemon.ignore_signals()

    # If we see a SIGINT, stop processing.
    def _stop_processing(*args):
        notification_queue.put((None, None))

    signal.signal(signal.SIGINT, _stop_processing)

    # Listen for notifications.
    notification_proc = multiprocessing.Process(
        target=notifications.listen,
        kwargs={
            "host_id": cfg.CONF.host,
            "amqp_url": cfg.CONF.amqp_url,
            "notifications_exchange_name": cfg.CONF.incoming_notifications_exchange,
            "rpc_exchange_name": cfg.CONF.rpc_exchange,
            "notification_queue": notification_queue,
        },
        name="notification-listener",
    )
    notification_proc.start()

    mgt_ip_address = quantum_api.get_local_service_ip(cfg.CONF).split("/")[0]
    metadata_proc = multiprocessing.Process(target=metadata.serve, args=(mgt_ip_address,), name="metadata-proxy")
    metadata_proc.start()

    # Set up the notifications publisher
    Publisher = notifications.Publisher if cfg.CONF.ceilometer.enabled else notifications.NoopPublisher
    publisher = Publisher(
        cfg.CONF.amqp_url, exchange_name=cfg.CONF.outgoing_notifications_exchange, topic=cfg.CONF.ceilometer.topic
    )

    # Set up a factory to make Workers that know how many threads to
    # run.
    worker_factory = functools.partial(
        worker.Worker,
        num_threads=cfg.CONF.num_worker_threads,
        notifier=publisher,
        ignore_directory=cfg.CONF.ignored_router_directory,
        queue_warning_threshold=cfg.CONF.queue_warning_threshold,
        reboot_error_threshold=cfg.CONF.reboot_error_threshold,
    )

    # Set up the scheduler that knows how to manage the routers and
    # dispatch messages.
    sched = scheduler.Scheduler(num_workers=cfg.CONF.num_worker_processes, worker_factory=worker_factory)

    # Prepopulate the workers with existing routers on startup
    populate.pre_populate_workers(sched)

    # Set up the periodic health check
    health.start_inspector(cfg.CONF.health_check_period, sched)

    # Block the main process, copying messages from the notification
    # listener to the scheduler
    try:
        shuffle_notifications(notification_queue, sched)
    finally:
        # Terminate the scheduler and its workers
        LOG.info("stopping processing")
        sched.stop()
        # Terminate the listening process
        LOG.debug("stopping %s", notification_proc.name)
        notification_proc.terminate()
        LOG.debug("stopping %s", metadata_proc.name)
        metadata_proc.terminate()
        LOG.info("exiting")
Exemplo n.º 4
0
def main(argv=sys.argv[1:]):
    # Change the process and thread name so the logs are cleaner.
    p = multiprocessing.current_process()
    p.name = 'pmain'
    t = threading.current_thread()
    t.name = 'tmain'

    register_and_load_opts()
    cfg.CONF(argv, project='akanda-rug')

    log.setup('akanda-rug')
    cfg.CONF.log_opt_values(LOG, logging.INFO)

    # Purge the mgt tap interface on startup
    quantum = quantum_api.Quantum(cfg.CONF)
    # TODO(mark): develop better way restore after machine reboot
    # quantum.purge_management_interface()

    # bring the mgt tap interface up
    quantum.ensure_local_service_port()

    # bring the external port
    if cfg.CONF.plug_external_port:
        quantum.ensure_local_external_port()

    # Set up the queue to move messages between the eventlet-based
    # listening process and the scheduler.
    notification_queue = multiprocessing.Queue()

    # Ignore signals that might interrupt processing.
    daemon.ignore_signals()

    # If we see a SIGINT, stop processing.
    def _stop_processing(*args):
        notification_queue.put((None, None))

    signal.signal(signal.SIGINT, _stop_processing)

    # Listen for notifications.
    notification_proc = multiprocessing.Process(
        target=notifications.listen,
        kwargs={
            'host_id': cfg.CONF.host,
            'amqp_url': cfg.CONF.amqp_url,
            'notifications_exchange_name':
            cfg.CONF.incoming_notifications_exchange,
            'rpc_exchange_name': cfg.CONF.rpc_exchange,
            'notification_queue': notification_queue
        },
        name='notification-listener',
    )
    notification_proc.start()

    mgt_ip_address = quantum_api.get_local_service_ip(cfg.CONF).split('/')[0]
    metadata_proc = multiprocessing.Process(target=metadata.serve,
                                            args=(mgt_ip_address, ),
                                            name='metadata-proxy')
    metadata_proc.start()

    # Set up the notifications publisher
    Publisher = (notifications.Publisher if cfg.CONF.ceilometer.enabled else
                 notifications.NoopPublisher)
    publisher = Publisher(
        cfg.CONF.amqp_url,
        exchange_name=cfg.CONF.outgoing_notifications_exchange,
        topic=cfg.CONF.ceilometer.topic,
    )

    # Set up a factory to make Workers that know how many threads to
    # run.
    worker_factory = functools.partial(
        worker.Worker,
        num_threads=cfg.CONF.num_worker_threads,
        notifier=publisher,
        ignore_directory=cfg.CONF.ignored_router_directory,
        queue_warning_threshold=cfg.CONF.queue_warning_threshold,
        reboot_error_threshold=cfg.CONF.reboot_error_threshold,
    )

    # Set up the scheduler that knows how to manage the routers and
    # dispatch messages.
    sched = scheduler.Scheduler(
        num_workers=cfg.CONF.num_worker_processes,
        worker_factory=worker_factory,
    )

    # Prepopulate the workers with existing routers on startup
    populate.pre_populate_workers(sched)

    # Set up the periodic health check
    health.start_inspector(cfg.CONF.health_check_period, sched)

    # Block the main process, copying messages from the notification
    # listener to the scheduler
    try:
        shuffle_notifications(notification_queue, sched)
    finally:
        # Terminate the scheduler and its workers
        LOG.info('stopping processing')
        sched.stop()
        # Terminate the listening process
        LOG.debug('stopping %s', notification_proc.name)
        notification_proc.terminate()
        LOG.debug('stopping %s', metadata_proc.name)
        metadata_proc.terminate()
        LOG.info('exiting')