def debug_one_router(args=sys.argv[1:]): # Add our extra option for specifying the router-id to debug cfg.CONF.register_cli_opts(DEBUG_OPTS) cfg.CONF.set_override("boot_timeout", 60000) cfg.CONF.import_opt("host", "akanda.rug.main") config.parse_config(args) logging.basicConfig( level=logging.DEBUG, format=":".join("%(" + n + ")s" for n in ["processName", "threadName", "name", "levelname", "message"]), ) log = logging.getLogger(__name__) log.debug("Proxy settings: %r", os.getenv("no_proxy")) context = worker.WorkerContext() router_obj = context.neutron.get_router_detail(cfg.CONF.router_id) a = state.Automaton( router_id=cfg.CONF.router_id, tenant_id=router_obj.tenant_id, delete_callback=delete_callback, bandwidth_callback=bandwidth_callback, worker_context=context, queue_warning_threshold=100, reboot_error_threshold=1, ) a.send_message(Fake("update")) import pdb pdb.set_trace() a.update(context)
def debug_one_router(args=sys.argv[1:]): # Add our extra option for specifying the router-id to debug cfg.CONF.register_cli_opts(DEBUG_OPTS) cfg.CONF.set_override('boot_timeout', 60000) cfg.CONF.import_opt('host', 'akanda.rug.main') config.parse_config(args) logging.basicConfig( level=logging.DEBUG, format=':'.join('%(' + n + ')s' for n in ['processName', 'threadName', 'name', 'levelname', 'message']), ) log = logging.getLogger(__name__) log.debug('Proxy settings: %r', os.getenv('no_proxy')) context = worker.WorkerContext() router_obj = context.neutron.get_router_detail(cfg.CONF.router_id) a = state.Automaton( router_id=cfg.CONF.router_id, tenant_id=router_obj.tenant_id, delete_callback=delete_callback, bandwidth_callback=bandwidth_callback, worker_context=context, queue_warning_threshold=100, reboot_error_threshold=1, ) a.send_message(Fake('update')) import pdb pdb.set_trace() a.update(context)
def main(argv=sys.argv[1:]): """Main Entry point into the akanda-rug This is the main entry point into the akanda-rug. On invocation of this method, logging, local network connectivity setup is performed. This information is obtained through the 'ak-config' file, passed as arguement to this method. Worker threads are spawned for handling various tasks that are associated with processing as well as responding to different Neutron events prior to starting a notification dispatch loop. :param argv: list of Command line arguments :returns: None :raises: None """ # TODO(rama) Error Handling to be added as part of the docstring # description # Change the process and thread name so the logs are cleaner. p = multiprocessing.current_process() p.name = 'pmain' t = threading.current_thread() t.name = 'tmain' ak_cfg.parse_config(argv) log.setup(cfg.CONF, 'akanda-rug') cfg.CONF.log_opt_values(LOG, logging.INFO) neutron = neutron_api.Neutron(cfg.CONF) # TODO(mark): develop better way restore after machine reboot # neutron.purge_management_interface() # bring the mgt tap interface up neutron.ensure_local_service_port() # bring the external port if cfg.CONF.plug_external_port: neutron.ensure_local_external_port() # Set up the queue to move messages between the eventlet-based # listening process and the scheduler. notification_queue = multiprocessing.Queue() # Ignore signals that might interrupt processing. daemon.ignore_signals() # If we see a SIGINT, stop processing. def _stop_processing(*args): notification_queue.put((None, None)) signal.signal(signal.SIGINT, _stop_processing) # Listen for notifications. notification_proc = multiprocessing.Process( target=notifications.listen, kwargs={'notification_queue': notification_queue}, name='notification-listener', ) notification_proc.start() mgt_ip_address = neutron_api.get_local_service_ip(cfg.CONF).split('/')[0] metadata_proc = multiprocessing.Process(target=metadata.serve, args=(mgt_ip_address, ), name='metadata-proxy') metadata_proc.start() from akanda.rug.api import rug as rug_api rug_api_proc = multiprocessing.Process(target=rug_api.serve, args=(mgt_ip_address, ), name='rug-api') rug_api_proc.start() # Set up the notifications publisher Publisher = (notifications.Publisher if cfg.CONF.ceilometer.enabled else notifications.NoopPublisher) publisher = Publisher(topic=cfg.CONF.ceilometer.topic, ) # Set up a factory to make Workers that know how many threads to # run. worker_factory = functools.partial(worker.Worker, notifier=publisher) # Set up the scheduler that knows how to manage the routers and # dispatch messages. sched = scheduler.Scheduler(worker_factory=worker_factory, ) # Prepopulate the workers with existing routers on startup populate.pre_populate_workers(sched) # Set up the periodic health check health.start_inspector(cfg.CONF.health_check_period, sched) # Block the main process, copying messages from the notification # listener to the scheduler try: shuffle_notifications(notification_queue, sched) finally: LOG.info(_LI('Stopping scheduler.')) sched.stop() LOG.info(_LI('Stopping notification publisher.')) publisher.stop() # Terminate the subprocesses for subproc in [notification_proc, metadata_proc, rug_api_proc]: LOG.info(_LI('Stopping %s.'), subproc.name) subproc.terminate()
def main(argv=sys.argv[1:]): """Main Entry point into the akanda-rug This is the main entry point into the akanda-rug. On invocation of this method, logging, local network connectivity setup is performed. This information is obtained through the 'ak-config' file, passed as arguement to this method. Worker threads are spawned for handling various tasks that are associated with processing as well as responding to different Neutron events prior to starting a notification dispatch loop. :param argv: list of Command line arguments :returns: None :raises: None """ # TODO(rama) Error Handling to be added as part of the docstring # description # Change the process and thread name so the logs are cleaner. p = multiprocessing.current_process() p.name = 'pmain' t = threading.current_thread() t.name = 'tmain' ak_cfg.parse_config(argv) log.setup(cfg.CONF, 'akanda-rug') cfg.CONF.log_opt_values(LOG, logging.INFO) neutron = neutron_api.Neutron(cfg.CONF) # TODO(mark): develop better way restore after machine reboot # neutron.purge_management_interface() # bring the mgt tap interface up neutron.ensure_local_service_port() # bring the external port if cfg.CONF.plug_external_port: neutron.ensure_local_external_port() # Set up the queue to move messages between the eventlet-based # listening process and the scheduler. notification_queue = multiprocessing.Queue() # Ignore signals that might interrupt processing. daemon.ignore_signals() # If we see a SIGINT, stop processing. def _stop_processing(*args): notification_queue.put((None, None)) signal.signal(signal.SIGINT, _stop_processing) # Listen for notifications. notification_proc = multiprocessing.Process( target=notifications.listen, kwargs={ 'notification_queue': notification_queue }, name='notification-listener', ) notification_proc.start() mgt_ip_address = neutron_api.get_local_service_ip(cfg.CONF).split('/')[0] metadata_proc = multiprocessing.Process( target=metadata.serve, args=(mgt_ip_address,), name='metadata-proxy' ) metadata_proc.start() from akanda.rug.api import rug as rug_api rug_api_proc = multiprocessing.Process( target=rug_api.serve, args=(mgt_ip_address,), name='rug-api' ) rug_api_proc.start() # Set up the notifications publisher Publisher = (notifications.Publisher if cfg.CONF.ceilometer.enabled else notifications.NoopPublisher) publisher = Publisher( topic=cfg.CONF.ceilometer.topic, ) # Set up a factory to make Workers that know how many threads to # run. worker_factory = functools.partial( worker.Worker, notifier=publisher ) # Set up the scheduler that knows how to manage the routers and # dispatch messages. sched = scheduler.Scheduler( worker_factory=worker_factory, ) # Prepopulate the workers with existing routers on startup populate.pre_populate_workers(sched) # Set up the periodic health check health.start_inspector(cfg.CONF.health_check_period, sched) # Block the main process, copying messages from the notification # listener to the scheduler try: shuffle_notifications(notification_queue, sched) finally: LOG.info(_LI('Stopping scheduler.')) sched.stop() LOG.info(_LI('Stopping notification publisher.')) publisher.stop() # Terminate the subprocesses for subproc in [notification_proc, metadata_proc, rug_api_proc]: LOG.info(_LI('Stopping %s.'), subproc.name) subproc.terminate()