Пример #1
0
def _worker(inq, worker_factory, scheduler, proc_name):
    """Scheduler's worker process main function.
    """
    daemon.ignore_signals()
    LOG.debug('starting worker process')
    worker = worker_factory(scheduler=scheduler, proc_name=proc_name)
    while True:
        try:
            data = inq.get()
        except IOError:
            # NOTE(dhellmann): Likely caused by a signal arriving
            # during processing, especially SIGCHLD.
            data = None
        if data is None:
            target, message = None, None
        else:
            target, message = data
        try:
            worker.handle_message(target, message)
        except Exception:
            LOG.exception(_LE('Error processing data %s'), unicode(data))
        if data is None:
            break
    LOG.debug('exiting')
Пример #2
0
def _worker(inq, worker_factory, scheduler, proc_name):
    """Scheduler's worker process main function.
    """
    daemon.ignore_signals()
    LOG.debug('starting worker process')
    worker = worker_factory(scheduler=scheduler, proc_name=proc_name)
    while True:
        try:
            data = inq.get()
        except IOError:
            # NOTE(dhellmann): Likely caused by a signal arriving
            # during processing, especially SIGCHLD.
            data = None
        if data is None:
            target, message = None, None
        else:
            target, message = data
        try:
            worker.handle_message(target, message)
        except Exception:
            LOG.exception(_LE('Error processing data %s'), six.text_type(data))
        if data is None:
            break
    LOG.debug('exiting')
Пример #3
0
def main(argv=sys.argv[1:]):
    """Main Entry point into the astara-orchestrator

    This is the main entry point into the astara-orchestrator. On invocation of
    this method, logging, local network connectivity setup is performed.
    This information is obtained through the 'ak-config' file, passed as
    arguement to this method. Worker threads are spawned for handling
    various tasks that are associated with processing as well as
    responding to different Neutron events prior to starting a notification
    dispatch loop.

    :param argv: list of Command line arguments

    :returns: None

    :raises: None

    """
    # TODO(rama) Error Handling to be added as part of the docstring
    # description

    # Change the process and thread name so the logs are cleaner.
    p = multiprocessing.current_process()
    p.name = 'pmain'
    t = threading.current_thread()
    t.name = 'tmain'
    ak_cfg.parse_config(argv)
    log.setup(cfg.CONF, 'astara-orchestrator')
    cfg.CONF.log_opt_values(LOG, logging.INFO)

    neutron = neutron_api.Neutron(cfg.CONF)

    # TODO(mark): develop better way restore after machine reboot
    # neutron.purge_management_interface()

    # bring the mgt tap interface up
    mgt_ip_address = neutron.ensure_local_service_port().split('/')[0]

    # Set up the queue to move messages between the eventlet-based
    # listening process and the scheduler.
    notification_queue = multiprocessing.Queue()

    # Ignore signals that might interrupt processing.
    daemon.ignore_signals()

    # If we see a SIGINT, stop processing.
    def _stop_processing(*args):
        notification_queue.put((None, None))

    signal.signal(signal.SIGINT, _stop_processing)

    # Listen for notifications.
    notification_proc = multiprocessing.Process(
        target=notifications.listen,
        kwargs={'notification_queue': notification_queue},
        name='notification-listener',
    )
    notification_proc.start()

    if CONF.coordination.enabled:
        coordinator_proc = multiprocessing.Process(
            target=coordination.start,
            kwargs={'notification_queue': notification_queue},
            name='coordinator',
        )
        coordinator_proc.start()
    else:
        coordinator_proc = None

    metadata_proc = multiprocessing.Process(target=metadata.serve,
                                            args=(mgt_ip_address, ),
                                            name='metadata-proxy')
    metadata_proc.start()

    from astara.api import rug as rug_api
    rug_api_proc = multiprocessing.Process(target=rug_api.serve,
                                           name='rug-api')
    rug_api_proc.start()

    # Set up the notifications publisher
    Publisher = (notifications.Publisher if cfg.CONF.ceilometer.enabled else
                 notifications.NoopPublisher)
    publisher = Publisher(topic=cfg.CONF.ceilometer.topic, )

    # Set up a factory to make Workers that know how many threads to
    # run.
    worker_factory = functools.partial(
        worker.Worker,
        notifier=publisher,
        management_address=mgt_ip_address,
    )

    # Set up the scheduler that knows how to manage the routers and
    # dispatch messages.
    sched = scheduler.Scheduler(worker_factory=worker_factory, )

    # Prepopulate the workers with existing routers on startup
    populate.pre_populate_workers(sched)

    # Set up the periodic health check
    health.start_inspector(cfg.CONF.health_check_period, sched)

    # Block the main process, copying messages from the notification
    # listener to the scheduler
    try:
        shuffle_notifications(notification_queue, sched)
    finally:
        LOG.info(_LI('Stopping scheduler.'))
        sched.stop()
        LOG.info(_LI('Stopping notification publisher.'))
        publisher.stop()

        # Terminate the subprocesses
        for subproc in [
                notification_proc, coordinator_proc, metadata_proc,
                rug_api_proc
        ]:
            if not subproc:
                continue
            LOG.info(_LI('Stopping %s.'), subproc.name)
            subproc.terminate()
Пример #4
0
def main(argv=sys.argv[1:]):
    """Main Entry point into the astara-orchestrator

    This is the main entry point into the astara-orchestrator. On invocation of
    this method, logging, local network connectivity setup is performed.
    This information is obtained through the 'ak-config' file, passed as
    arguement to this method. Worker threads are spawned for handling
    various tasks that are associated with processing as well as
    responding to different Neutron events prior to starting a notification
    dispatch loop.

    :param argv: list of Command line arguments

    :returns: None

    :raises: None

    """
    # TODO(rama) Error Handling to be added as part of the docstring
    # description

    # Change the process and thread name so the logs are cleaner.
    p = multiprocessing.current_process()
    p.name = 'pmain'
    t = threading.current_thread()
    t.name = 'tmain'
    ak_cfg.parse_config(argv)
    log.setup(cfg.CONF, 'astara-orchestrator')
    cfg.CONF.log_opt_values(LOG, logging.INFO)

    neutron = neutron_api.Neutron(cfg.CONF)

    # TODO(mark): develop better way restore after machine reboot
    # neutron.purge_management_interface()

    # bring the mgt tap interface up
    mgt_ip_address = neutron.ensure_local_service_port().split('/')[0]

    # Set up the queue to move messages between the eventlet-based
    # listening process and the scheduler.
    notification_queue = multiprocessing.Queue()

    # Ignore signals that might interrupt processing.
    daemon.ignore_signals()

    # If we see a SIGINT, stop processing.
    def _stop_processing(*args):
        notification_queue.put((None, None))
    signal.signal(signal.SIGINT, _stop_processing)

    # Listen for notifications.
    notification_proc = multiprocessing.Process(
        target=notifications.listen,
        kwargs={
            'notification_queue': notification_queue
        },
        name='notification-listener',
    )
    notification_proc.start()

    if CONF.coordination.enabled:
        coordinator_proc = multiprocessing.Process(
            target=coordination.start,
            kwargs={
                'notification_queue': notification_queue
            },
            name='coordinator',
        )
        coordinator_proc.start()
    else:
        coordinator_proc = None

    metadata_proc = multiprocessing.Process(
        target=metadata.serve,
        args=(mgt_ip_address,),
        name='metadata-proxy'
    )
    metadata_proc.start()

    from astara.api import rug as rug_api
    rug_api_proc = multiprocessing.Process(
        target=rug_api.serve,
        name='rug-api'
    )
    rug_api_proc.start()

    # Set up the notifications publisher
    Publisher = (notifications.Publisher if cfg.CONF.ceilometer.enabled
                 else notifications.NoopPublisher)
    publisher = Publisher(
        topic=cfg.CONF.ceilometer.topic,
    )

    # Set up a factory to make Workers that know how many threads to
    # run.
    worker_factory = functools.partial(
        worker.Worker,
        notifier=publisher,
        management_address=mgt_ip_address,
    )

    # Set up the scheduler that knows how to manage the routers and
    # dispatch messages.
    sched = scheduler.Scheduler(
        worker_factory=worker_factory,
    )

    # Prepopulate the workers with existing routers on startup
    populate.pre_populate_workers(sched)

    # Set up the periodic health check
    health.start_inspector(cfg.CONF.health_check_period, sched)

    # Block the main process, copying messages from the notification
    # listener to the scheduler
    try:
        shuffle_notifications(notification_queue, sched)
    finally:
        LOG.info(_LI('Stopping scheduler.'))
        sched.stop()
        LOG.info(_LI('Stopping notification publisher.'))
        publisher.stop()

        # Terminate the subprocesses
        for subproc in [notification_proc, coordinator_proc, metadata_proc,
                        rug_api_proc]:
            if not subproc:
                continue
            LOG.info(_LI('Stopping %s.'), subproc.name)
            subproc.terminate()