def debug_one_router(args=sys.argv[1:]): # Add our extra option for specifying the router-id to debug cfg.CONF.register_cli_opts(DEBUG_OPTS) cfg.CONF.set_override('boot_timeout', 60000) cfg.CONF.import_opt('host', 'astara.main') config.parse_config(args) logging.setup(cfg.CONF, __name__) log = logging.getLogger(__name__) log.debug('Proxy settings: %r', os.getenv('no_proxy')) context = worker.WorkerContext() driver = drivers.get('router')(context, cfg.CONF.router_id) a = state.Automaton( resource=driver, tenant_id=driver._router.tenant_id, delete_callback=delete_callback, bandwidth_callback=bandwidth_callback, worker_context=context, queue_warning_threshold=100, reboot_error_threshold=1, ) a.send_message(Fake('update')) import pdb pdb.set_trace() a.update(context)
def debug_one_router(args=sys.argv[1:]): # Add our extra option for specifying the router-id to debug cfg.CONF.register_cli_opts(DEBUG_OPTS) cfg.CONF.set_override("boot_timeout", 60000) cfg.CONF.import_opt("host", "astara.main") config.parse_config(args) logging.setup(cfg.CONF, __name__) log = logging.getLogger(__name__) log.debug("Proxy settings: %r", os.getenv("no_proxy")) context = worker.WorkerContext() driver = drivers.get("router")(context, cfg.CONF.router_id) a = state.Automaton( driver=driver, resource_id=cfg.CONF.router_id, tenant_id=driver._router.tenant_id, delete_callback=delete_callback, bandwidth_callback=bandwidth_callback, worker_context=context, queue_warning_threshold=100, reboot_error_threshold=1, ) a.send_message(Fake("update")) import pdb pdb.set_trace() a.update(context)
def main(argv=sys.argv[1:]): ak_cfg.parse_config(argv) log.setup(CONF, 'astara-pez') CONF.log_opt_values(LOG, logging.INFO) LOG.info(_LI("Starting Astara Pez service.")) mgr = PezService() launcher = service.launch(CONF, mgr) launcher.wait()
def main(argv=sys.argv[1:]): """Main Entry point into the astara-orchestrator This is the main entry point into the astara-orchestrator. On invocation of this method, logging, local network connectivity setup is performed. This information is obtained through the 'ak-config' file, passed as arguement to this method. Worker threads are spawned for handling various tasks that are associated with processing as well as responding to different Neutron events prior to starting a notification dispatch loop. :param argv: list of Command line arguments :returns: None :raises: None """ # TODO(rama) Error Handling to be added as part of the docstring # description # Change the process and thread name so the logs are cleaner. p = multiprocessing.current_process() p.name = 'pmain' t = threading.current_thread() t.name = 'tmain' ak_cfg.parse_config(argv) log.setup(cfg.CONF, 'astara-orchestrator') cfg.CONF.log_opt_values(LOG, logging.INFO) neutron = neutron_api.Neutron(cfg.CONF) # TODO(mark): develop better way restore after machine reboot # neutron.purge_management_interface() # bring the mgt tap interface up mgt_ip_address = neutron.ensure_local_service_port().split('/')[0] # Set up the queue to move messages between the eventlet-based # listening process and the scheduler. notification_queue = multiprocessing.Queue() # Ignore signals that might interrupt processing. daemon.ignore_signals() # If we see a SIGINT, stop processing. def _stop_processing(*args): notification_queue.put((None, None)) signal.signal(signal.SIGINT, _stop_processing) # Listen for notifications. notification_proc = multiprocessing.Process( target=notifications.listen, kwargs={'notification_queue': notification_queue}, name='notification-listener', ) notification_proc.start() if CONF.coordination.enabled: coordinator_proc = multiprocessing.Process( target=coordination.start, kwargs={'notification_queue': notification_queue}, name='coordinator', ) coordinator_proc.start() else: coordinator_proc = None metadata_proc = multiprocessing.Process(target=metadata.serve, args=(mgt_ip_address, ), name='metadata-proxy') metadata_proc.start() from astara.api import rug as rug_api rug_api_proc = multiprocessing.Process(target=rug_api.serve, name='rug-api') rug_api_proc.start() # Set up the notifications publisher Publisher = (notifications.Publisher if cfg.CONF.ceilometer.enabled else notifications.NoopPublisher) publisher = Publisher(topic=cfg.CONF.ceilometer.topic, ) # Set up a factory to make Workers that know how many threads to # run. worker_factory = functools.partial( worker.Worker, notifier=publisher, management_address=mgt_ip_address, ) # Set up the scheduler that knows how to manage the routers and # dispatch messages. sched = scheduler.Scheduler(worker_factory=worker_factory, ) # Prepopulate the workers with existing routers on startup populate.pre_populate_workers(sched) # Set up the periodic health check health.start_inspector(cfg.CONF.health_check_period, sched) # Block the main process, copying messages from the notification # listener to the scheduler try: shuffle_notifications(notification_queue, sched) finally: LOG.info(_LI('Stopping scheduler.')) sched.stop() LOG.info(_LI('Stopping notification publisher.')) publisher.stop() # Terminate the subprocesses for subproc in [ notification_proc, coordinator_proc, metadata_proc, rug_api_proc ]: if not subproc: continue LOG.info(_LI('Stopping %s.'), subproc.name) subproc.terminate()
def main(argv=sys.argv[1:]): """Main Entry point into the astara-orchestrator This is the main entry point into the astara-orchestrator. On invocation of this method, logging, local network connectivity setup is performed. This information is obtained through the 'ak-config' file, passed as arguement to this method. Worker threads are spawned for handling various tasks that are associated with processing as well as responding to different Neutron events prior to starting a notification dispatch loop. :param argv: list of Command line arguments :returns: None :raises: None """ # TODO(rama) Error Handling to be added as part of the docstring # description # Change the process and thread name so the logs are cleaner. p = multiprocessing.current_process() p.name = 'pmain' t = threading.current_thread() t.name = 'tmain' ak_cfg.parse_config(argv) log.setup(cfg.CONF, 'astara-orchestrator') cfg.CONF.log_opt_values(LOG, logging.INFO) neutron = neutron_api.Neutron(cfg.CONF) # TODO(mark): develop better way restore after machine reboot # neutron.purge_management_interface() # bring the mgt tap interface up mgt_ip_address = neutron.ensure_local_service_port().split('/')[0] # Set up the queue to move messages between the eventlet-based # listening process and the scheduler. notification_queue = multiprocessing.Queue() # Ignore signals that might interrupt processing. daemon.ignore_signals() # If we see a SIGINT, stop processing. def _stop_processing(*args): notification_queue.put((None, None)) signal.signal(signal.SIGINT, _stop_processing) # Listen for notifications. notification_proc = multiprocessing.Process( target=notifications.listen, kwargs={ 'notification_queue': notification_queue }, name='notification-listener', ) notification_proc.start() if CONF.coordination.enabled: coordinator_proc = multiprocessing.Process( target=coordination.start, kwargs={ 'notification_queue': notification_queue }, name='coordinator', ) coordinator_proc.start() else: coordinator_proc = None metadata_proc = multiprocessing.Process( target=metadata.serve, args=(mgt_ip_address,), name='metadata-proxy' ) metadata_proc.start() from astara.api import rug as rug_api rug_api_proc = multiprocessing.Process( target=rug_api.serve, name='rug-api' ) rug_api_proc.start() # Set up the notifications publisher Publisher = (notifications.Publisher if cfg.CONF.ceilometer.enabled else notifications.NoopPublisher) publisher = Publisher( topic=cfg.CONF.ceilometer.topic, ) # Set up a factory to make Workers that know how many threads to # run. worker_factory = functools.partial( worker.Worker, notifier=publisher, management_address=mgt_ip_address, ) # Set up the scheduler that knows how to manage the routers and # dispatch messages. sched = scheduler.Scheduler( worker_factory=worker_factory, ) # Prepopulate the workers with existing routers on startup populate.pre_populate_workers(sched) # Set up the periodic health check health.start_inspector(cfg.CONF.health_check_period, sched) # Block the main process, copying messages from the notification # listener to the scheduler try: shuffle_notifications(notification_queue, sched) finally: LOG.info(_LI('Stopping scheduler.')) sched.stop() LOG.info(_LI('Stopping notification publisher.')) publisher.stop() # Terminate the subprocesses for subproc in [notification_proc, coordinator_proc, metadata_proc, rug_api_proc]: if not subproc: continue LOG.info(_LI('Stopping %s.'), subproc.name) subproc.terminate()