def test_warning_when_rpc_transport(self, log): transport = oslo_messaging.get_rpc_transport(self.conf) target = oslo_messaging.Target(topic='foo') endpoints = [object()] oslo_messaging.get_notification_listener(transport, [target], endpoints) log.warning.assert_called_once_with( "Using RPC transport for notifications. Please use " "get_notification_transport to obtain a " "notification transport instance.")
def test_warning_when_rpc_transport(self, log): transport = oslo_messaging.get_rpc_transport(self.conf) target = oslo_messaging.Target(topic='foo') endpoints = [object()] oslo_messaging.get_notification_listener( transport, [target], endpoints) log.warning.assert_called_once_with( "Using RPC transport for notifications. Please use " "get_notification_transport to obtain a " "notification transport instance.")
def test_unknown_executor(self): transport = oslo_messaging.get_transport(self.conf, url="fake:") try: oslo_messaging.get_notification_listener(transport, [], [], executor="foo") except Exception as ex: self.assertIsInstance(ex, oslo_messaging.ExecutorLoadFailure) self.assertEqual("foo", ex.executor) else: self.assertTrue(False)
def test_unknown_executor(self): transport = oslo_messaging.get_transport(self.conf, url='fake:') try: oslo_messaging.get_notification_listener(transport, [], [], executor='foo') except Exception as ex: self.assertIsInstance(ex, oslo_messaging.ExecutorLoadFailure) self.assertEqual('foo', ex.executor) else: self.assertTrue(False)
def start_keystone_listener(app): global kube_app kube_app = app conf = cfg.ConfigOpts() conf.transport_url = get_transport_url() if conf.transport_url is None: return transport = oslo_messaging.get_rpc_transport(conf) targets = [ oslo_messaging.Target(exchange='keystone', topic='notifications', fanout=True), ] endpoints = [ NotificationEndpoint(), ] pool = "sysinv-keystone-listener-workers" server = oslo_messaging.get_notification_listener(transport, targets, endpoints, pool=pool) LOG.info("Sysinv keystone listener started!") server.start() server.wait()
def setup_sg_rpc_callbacks(self): # following way to register call back functions start in kilo self._create_sg_f = self.bsn_create_sg_callback self._delete_sg_f = self.bsn_delete_sg_callback self._update_sg_f = self.bsn_update_sg_callback self._create_sg_rule_f = self.bsn_create_sg_rule_callback self._delete_sg_rule_f = self.bsn_delete_sg_rule_callback registry.subscribe(self._create_sg_f, resources.SECURITY_GROUP, events.AFTER_CREATE) registry.subscribe(self._delete_sg_f, resources.SECURITY_GROUP, events.AFTER_DELETE) registry.subscribe(self._update_sg_f, resources.SECURITY_GROUP, events.AFTER_UPDATE) registry.subscribe(self._create_sg_rule_f, resources.SECURITY_GROUP_RULE, events.AFTER_CREATE) registry.subscribe(self._delete_sg_rule_f, resources.SECURITY_GROUP_RULE, events.AFTER_DELETE) # the above does not cover the cases where security groups are # initially created or when they are deleted since those actions # aren't needed by the L2 agent. In order to receive those, we # subscribe to the notifications topic that receives all of the # API create/update/delete events. # Notifications are published at the 'info' level so they will result # in a call to the 'info' function below. From there we can check # the event type and determine what to do from there. target = oslo_messaging.Target(topic='#', server=cfg.CONF.host) keystone_target = oslo_messaging.Target( topic='#', exchange='keystone', server=cfg.CONF.host) self.listener = oslo_messaging.get_notification_listener( n_rpc.TRANSPORT, [target, keystone_target], [self], executor='eventlet', allow_requeue=False) self.listener.start()
def main(): register_keystoneauth_opts(CONF) CONF(sys.argv[1:], version='1.0.10', default_config_files=config.find_config_files()) logging.setup(CONF, 'join') transport = oslo_messaging.get_transport(CONF) targets = [oslo_messaging.Target(topic='notifications')] endpoints = [NotificationEndpoint()] server = oslo_messaging.get_notification_listener(transport, targets, endpoints, executor='threading', allow_requeue=True) LOG.info("Starting") server.start() try: while True: time.sleep(1) except KeyboardInterrupt: LOG.info("Stopping, be patient") server.stop() server.wait()
def start(self): super(RPCService, self).start() target = messaging.Target(topic=self.topic, server=self.host) endpoints = [self.manager] serializer = objects_base.KongmingObjectSerializer() self.rpcserver = rpc.get_server(target, endpoints, serializer) self.rpcserver.start() admin_context = context.get_admin_context() self.tg.add_dynamic_timer( self.manager.periodic_tasks, periodic_interval_max=CONF.periodic_interval, context=admin_context) LOG.info('Created RPC server for service %(service)s on host ' '%(host)s.', {'service': self.topic, 'host': self.host}) if self.init_notification_listner: transport = messaging.get_notification_transport(CONF) targets = [ messaging.Target(topic='versioned_notifications', exchange='nova') ] endpoints = [ notification_handler.NotificationEndpoint() ] self.notification_listener = messaging.get_notification_listener( transport, targets, endpoints, executor='threading', pool='kongming-notification-handler') self.notification_listener.start()
def ListenerProc(exchange, project_id, cluster_id, recover_action): """Thread procedure for running an event listener. :param exchange: The control exchange for a target service. :param project_id: The ID of the project to filter. :param cluster_id: The ID of the cluster to filter. :param recover_action: The health policy action name. """ transport = messaging.get_notification_transport(cfg.CONF) if exchange == cfg.CONF.health_manager.nova_control_exchange: endpoint = nova_endpoint.NovaNotificationEndpoint( project_id, cluster_id, recover_action) else: endpoint = heat_endpoint.HeatNotificationEndpoint( project_id, cluster_id, recover_action) listener = messaging.get_notification_listener(transport, [endpoint.target], [endpoint], executor='threading', pool='senlin-listeners') listener.start()
def ListenerProc(exchange, project_id, cluster_id, recover_action): """Thread procedure for running a event listener. :param exchange: The control exchange for a target service. :param project_id: The ID of the project to filter. :param cluster_id: The ID of the cluster to filter. :param recover_action: The health policy action name. """ transport = messaging.get_notification_transport(cfg.CONF) if exchange == cfg.CONF.health_manager.nova_control_exchange: targets = [ messaging.Target(topic='versioned_notifications', exchange=exchange), ] endpoints = [ NovaNotificationEndpoint(project_id, cluster_id, recover_action), ] else: # heat notification targets = [ messaging.Target(topic='notifications', exchange=exchange), ] endpoints = [ HeatNotificationEndpoint(project_id, cluster_id, recover_action), ] listener = messaging.get_notification_listener(transport, targets, endpoints, executor='threading', pool="senlin-listeners") listener.start()
def _setup_listener(self, transport, endpoints, targets=None, pool=None, batch=False): if pool is None: tracker_name = '__default__' else: tracker_name = pool if targets is None: targets = [oslo_messaging.Target(topic='testtopic')] tracker = self.trackers.setdefault( tracker_name, self.ThreadTracker()) if batch: listener = oslo_messaging.get_batch_notification_listener( transport, targets=targets, endpoints=[tracker] + endpoints, allow_requeue=True, pool=pool, executor='eventlet', batch_size=batch[0], batch_timeout=batch[1]) else: listener = oslo_messaging.get_notification_listener( transport, targets=targets, endpoints=[tracker] + endpoints, allow_requeue=True, pool=pool, executor='eventlet') thread = RestartableServerThread(listener) tracker.start(thread) return thread
def update_catalog(self, config): LOG.debug("GetMetricTypes called") while self.init_lock: if not self.oslo_listener: LOG.debug("Starting oslo.messaging listener") self.oslo_listener = messaging.get_notification_listener( messaging.get_transport( cfg.CONF, config["transport_url"]), [messaging.Target(topic='notifications')], [NotificationHandler()], 'threading', allow_requeue=True) self.oslo_listener.start() metrics = [] for key in ("cpu_cstate", "io_bandwith", "memory_bandwith"): metric = snap.Metric( namespace=[ snap.NamespaceElement(value="intel"), snap.NamespaceElement(value="node_manager"), snap.NamespaceElement(value="cups"), snap.NamespaceElement(value=key) ], version=1, tags={"mtype": "gauge"}, description="CUPS {}".format(key.replace('_', ' ')), ) metric.namespace.add_dynamic_element("uuid", "node UUID") metrics.append(metric) return metrics
def get_listeners(): endpoints = [GenericEndpoint()] listeners = list() notifications_urls = plugin_settings.NOTIFICATIONS_URL or [] for url in notifications_urls: try: parsed_url = oslo_messaging.TransportURL.parse(cfg.CONF, url) except Exception as e: LOG.exception(e) continue transport = oslo_messaging.get_notification_transport(cfg.CONF, url=parsed_url) targets = list() for exchange in plugin_settings.NOTIFICATIONS_EXCHANGE: for topic in plugin_settings.NOTIFICATIONS_TOPIC: if topic and exchange: targets.append( oslo_messaging.Target(topic=topic, exchange=exchange)) listener = oslo_messaging.get_notification_listener( transport=transport, targets=targets, endpoints=endpoints, allow_requeue=True, pool=plugin_settings.NOTIFICATIONS_POOL, executor=EXECUTOR) listeners.append(listener) return listeners
def setup_sg_rpc_callbacks(self): # following way to register call back functions start in kilo self._create_sg_f = self.bsn_create_sg_callback self._delete_sg_f = self.bsn_delete_sg_callback self._update_sg_f = self.bsn_update_sg_callback self._create_sg_rule_f = self.bsn_create_sg_rule_callback self._delete_sg_rule_f = self.bsn_delete_sg_rule_callback registry.subscribe(self._create_sg_f, resources.SECURITY_GROUP, events.AFTER_CREATE) registry.subscribe(self._delete_sg_f, resources.SECURITY_GROUP, events.AFTER_DELETE) registry.subscribe(self._update_sg_f, resources.SECURITY_GROUP, events.AFTER_UPDATE) registry.subscribe(self._create_sg_rule_f, resources.SECURITY_GROUP_RULE, events.AFTER_CREATE) registry.subscribe(self._delete_sg_rule_f, resources.SECURITY_GROUP_RULE, events.AFTER_DELETE) # the above does not cover the cases where security groups are # initially created or when they are deleted since those actions # aren't needed by the L2 agent. In order to receive those, we # subscribe to the notifications topic that receives all of the # API create/update/delete events. # Notifications are published at the 'info' level so they will result # in a call to the 'info' function below. From there we can check # the event type and determine what to do from there. target = oslo_messaging.Target(topic='#', server=cfg.CONF.host) keystone_target = oslo_messaging.Target(topic='#', exchange='keystone', server=cfg.CONF.host) self.listener = oslo_messaging.get_notification_listener( n_rpc.TRANSPORT, [target, keystone_target], [self], executor='eventlet', allow_requeue=False) self.listener.start()
def start(self): super(RPCService, self).start() target = messaging.Target(topic=self.topic, server=self.host) endpoints = [self.manager] serializer = objects_base.KongmingObjectSerializer() self.rpcserver = rpc.get_server(target, endpoints, serializer) self.rpcserver.start() admin_context = context.get_admin_context() self.tg.add_dynamic_timer(self.manager.periodic_tasks, periodic_interval_max=CONF.periodic_interval, context=admin_context) LOG.info( 'Created RPC server for service %(service)s on host ' '%(host)s.', { 'service': self.topic, 'host': self.host }) if self.init_notification_listner: transport = messaging.get_notification_transport(CONF) targets = [ messaging.Target(topic='versioned_notifications', exchange='nova') ] endpoints = [notification_handler.NotificationEndpoint()] self.notification_listener = messaging.get_notification_listener( transport, targets, endpoints, executor='threading', pool='kongming-notification-handler') self.notification_listener.start()
def start_rabbitmq_client(): """Start Rabbitmq client to listen instance notifications from Nova""" cfg = get_rabbit_config() rabbit_url = "rabbit://%s:%s@%s:%s/%s" % (cfg['user_id'], cfg['password'], cfg['host'], cfg['port'], cfg['virt_host']) LOG.info(rabbit_url) target = oslo_messaging.Target(exchange="nova", topic="notifications", server="info", version="2.1", fanout=True) transport = oslo_messaging.get_notification_transport(CONF, url=rabbit_url) endpoints = [ InstCreateNotificationEp(), InstResizeNotificationEp(), InstDelNotificationEp() ] server = oslo_messaging.get_notification_listener(transport, [target], endpoints, "threading") thread = threading.Thread(target=rpc_work, args=(server, )) thread.start() LOG.info("Rabbitmq Client Started!") return server
def start_rabbitmq_client(): """Start Rabbitmq client to listen instance notifications from Nova""" cfg = CONF.amqp rabbit_url = "rabbit://%s:%s@%s:%s/%s" % (cfg['user_id'], cfg['password'], cfg['host'], cfg['port'], cfg['virt_host']) topic = cfg['topic'] LOG.info(rabbit_url) target = oslo_messaging.Target(exchange="nova", topic=topic, server="info", version="2.1", fanout=True) transport = oslo_messaging.get_notification_transport(CONF, url=rabbit_url) payload_decoder = UnversionedPayloadDecoder() if topic == 'versioned_notifications': payload_decoder = VersionedPayloadDecoder() endpoints = [ InstanceOnlineNotificationEndpoint(payload_decoder), InstanceOfflineNotificationEndpoint(payload_decoder), ] server = oslo_messaging.get_notification_listener(transport, [target], endpoints, "threading", allow_requeue=True) thread = threading.Thread(target=rpc_work, args=(server, )) thread.start() LOG.info("Rabbitmq Client Started!") return server
def _setup_listener(self, transport, endpoints, targets=None, pool=None, batch=False): if pool is None: tracker_name = '__default__' else: tracker_name = pool if targets is None: targets = [oslo_messaging.Target(topic='testtopic')] tracker = self.trackers.setdefault( tracker_name, self.ThreadTracker()) if batch: listener = oslo_messaging.get_batch_notification_listener( transport, targets=targets, endpoints=[tracker] + endpoints, allow_requeue=True, pool=pool, executor='eventlet', batch_size=batch[0], batch_timeout=batch[1]) else: listener = oslo_messaging.get_notification_listener( transport, targets=targets, endpoints=[tracker] + endpoints, allow_requeue=True, pool=pool, executor='eventlet') thread = RestartableServerThread(listener) tracker.start(thread) return thread
def build_notification_handler(self, topic_names, endpoints=()): serializer = rpc.RequestContextSerializer(rpc.JsonPayloadSerializer()) targets = [om.Target(topic=topic_name) for topic_name in topic_names] return om.get_notification_listener( self.notification_transport, targets, endpoints, executor='eventlet', serializer=serializer, allow_requeue=False)
def getNotificationListener(self, targets, endpoints): assert self.TRANSPORT is not None return oslo_msg.get_notification_listener(self.TRANSPORT, targets, endpoints, allow_requeue=True, executor="eventlet")
def __init__(self, pool): ### code from oslo_messaging/notify/listener.py self.transport = oslo_messaging.get_notification_transport(cfg.CONF) self.targets = [oslo_messaging.Target(topic='notifications')] self.endpoints = [NotificationEndpoint(pool)] ### TODO : use config file for pool self.server = oslo_messaging.get_notification_listener( self.transport, self.targets, self.endpoints, executor='eventlet')
def getNotificationListener(self, targets, endpoints): assert self.TRANSPORT is not None return oslo_msg.get_notification_listener(self.TRANSPORT, targets, endpoints, allow_requeue=False, executor="eventlet")
def get_listener(targets, endpoints, serializer=None): assert TRANSPORT is not None if serializer is None: serializer = JsonPayloadSerializer() return messaging.get_notification_listener(TRANSPORT, targets, endpoints, executor='eventlet', serializer=serializer)
def __init__(self, topic, url, endpoints, exchange=None, fanout=False): super(DfaNotifcationListener, self).__init__() transport = messaging.get_transport(cfg.CONF, url=url) targets = [ messaging.Target(exchange=exchange, fanout=fanout, topic=topic) ] endpoints = [endpoints] self._listener = messaging.get_notification_listener( transport, targets, endpoints)
def __init__(self): self.transport = messaging.get_transport(cfg.CONF) self.targets = [messaging.Target(topic='notification')] self.endpoints = [NotificationHandler()] self.server = messaging.get_notification_listener(self.transport, self.targets, self.endpoints, executor='blocking', pool='test')
def get_listener(targets, endpoints, serializer=None): assert TRANSPORT is not None if serializer is None: serializer = JsonPayloadSerializer() return messaging.get_notification_listener(TRANSPORT, targets, endpoints, executor='eventlet', serializer=serializer)
def get_notification_listener(transport, targets, endpoints, allow_requeue=False): """Return a configured oslo_messaging notification listener.""" return oslo_msg.get_notification_listener(transport, targets, endpoints, allow_requeue=allow_requeue)
def __init__(self): self._zk = None transport = oslo_messaging.get_notification_transport(CONF) targets = [oslo_messaging.Target(exchange='ceilometer', topic='event')] endpoints = [NotificationHandler(self.zk)] server = oslo_messaging.get_notification_listener(transport, targets, endpoints, executor='threading') self.server = server
def get_notification_listener(): endpoints = [report_notification, track_instance, untrack_instance] transport = messaging.get_notification_transport(CONF) s_target = target.Target(topic='murano', server=str(uuid.uuid4())) listener = messaging.get_notification_listener(transport, [s_target], endpoints, executor='threading') return listener
def __init__(self, topic, url, endpoints, exchange=None, fanout=False): super(DfaNotifcationListener, self).__init__() transport = messaging.get_transport(cfg.CONF, url=url) targets = [messaging.Target(exchange=exchange, fanout=fanout, topic=topic)] endpoints = [endpoints] self._listener = messaging.get_notification_listener(transport, targets, endpoints)
def _set_up_listener(transport, agent_id): targets = [ oslo_messaging.Target(topic='ironic-neutron-agent-member-manager') ] endpoints = [HashRingMemberManagerNotificationEndpoint()] return oslo_messaging.get_notification_listener(transport, targets, endpoints, executor='eventlet', pool=agent_id)
def test_no_target_topic(self): transport = oslo_messaging.get_transport(self.conf, url="fake:") listener = oslo_messaging.get_notification_listener(transport, [oslo_messaging.Target()], [mock.Mock()]) try: listener.start() except Exception as ex: self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex) else: self.assertTrue(False)
def start(self): super(ListenerService, self).start() transport = oslo_messaging.get_transport(cfg.CONF) targets = [ oslo_messaging.Target(topic=pl_topic, exchange=pl_exchange) for pl_topic, pl_exchange in self.topics_exchanges_set ] endpoints = [NotificationEndpoint(self.plugins)] listener = oslo_messaging.get_notification_listener(transport, targets, endpoints) listener.start() self.listeners.append(listener)
def setUp(self): super(NotificationFixture, self).setUp() targets = [oslo_messaging.Target(topic=t) for t in self.topics] # add a special topic for internal notifications targets.append(oslo_messaging.Target(topic=self.name)) transport = self.useFixture(TransportFixture(self.url)) self.server = oslo_messaging.get_notification_listener( transport.transport, targets, [self]) self._ctrl = self.notifier('internal', topic=self.name) self._start() transport.wait()
def setUp(self): super(NotificationFixture, self).setUp() targets = [oslo_messaging.Target(topic=t) for t in self.topics] # add a special topic for internal notifications targets.append(oslo_messaging.Target(topic=self.name)) self.server = oslo_messaging.get_notification_listener( self.transport, targets, [self]) self._ctrl = self.notifier('internal', topic=self.name) self._start()
def start(self): super(ListenerService, self).start() transport = oslo_messaging.get_transport(cfg.CONF) targets = [ oslo_messaging.Target(topic="notifications", exchange="glance") ] endpoints = [NotificationEndpoint()] listener = oslo_messaging.get_notification_listener( transport, targets, endpoints) listener.start() self.listeners.append(listener)
def get_notification_listener(targets, endpoints, serializer=None, pool=None): if NOTIFICATION_TRANSPORT is None: raise AssertionError("'NOTIFICATION_TRANSPORT' must not be None") if serializer is None: serializer = JsonPayloadSerializer() return messaging.get_notification_listener(NOTIFICATION_TRANSPORT, targets, endpoints, executor='eventlet', pool=pool, serializer=serializer)
def start(self): endpoints = [report_notification, track_instance, untrack_instance] transport = messaging.get_transport(CONF) s_target = target.Target(topic='murano', server=str(uuid.uuid4())) self.server = messaging.get_notification_listener( transport, [s_target], endpoints, executor='eventlet') self.server.start() super(NotificationService, self).start()
def test_no_target_topic(self): transport = oslo_messaging.get_transport(self.conf, url='fake:') listener = oslo_messaging.get_notification_listener( transport, [oslo_messaging.Target()], [mock.Mock()]) try: listener.start() except Exception as ex: self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex) else: self.assertTrue(False)
def test_constructor(self): transport = oslo_messaging.get_transport(self.conf, url="fake:") target = oslo_messaging.Target(topic="foo") endpoints = [object()] listener = oslo_messaging.get_notification_listener(transport, [target], endpoints) self.assertIs(listener.conf, self.conf) self.assertIs(listener.transport, transport) self.assertIsInstance(listener.dispatcher, dispatcher.NotificationDispatcher) self.assertIs(listener.dispatcher.endpoints, endpoints) self.assertEqual("blocking", listener.executor)
def main(argv=None): _usage = """Usage: %prog [options] <target, ...>""" logging.warning("listener.py has been superseded by notifier") parser = optparse.OptionParser(usage=_usage) parser.add_option("--name", action="store", default=uuid.uuid4().hex) parser.add_option("--url", action="store", default="qpid://localhost") parser.add_option("--exchange", action="store") parser.add_option("--namespace", action="store") parser.add_option("--pool", action="store") parser.add_option("--quiet", action="store_true", default=False, help="Supress console output") parser.add_option("--debug", action="store_true", help="Enable debug logging.") parser.add_option("--oslo-config", type="string", help="the oslo.messaging configuration file.") opts, topics = parser.parse_args(args=argv) if not topics: if not opts.quiet: print("missing topics!") return -1 if not opts.quiet: print("listener %s: url=%s, topics=%s" % (opts.name, opts.url, topics)) if opts.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARN) if opts.oslo_config: if opts.debug: print("Loading config file %s" % opts.oslo_config) cfg.CONF(["--config-file", opts.oslo_config]) targets = [oslo_messaging.Target(exchange=opts.exchange, topic=t, namespace=opts.namespace) for t in topics] transport = oslo_messaging.get_transport(cfg.CONF, url=opts.url) listener = oslo_messaging.get_notification_listener(transport, targets, [TestNotificationEndpoint(opts.name)], pool=opts.pool) try: listener.start() while True: time.sleep(1) except KeyboardInterrupt: print("Stopping..") listener.stop() listener.wait() transport.cleanup() return 0
def ListenerProc(exchange, project_id, cluster_id): transport = messaging.get_notification_transport(cfg.CONF) targets = [ messaging.Target(topic='notifications', exchange=exchange), ] endpoints = [ NotificationEndpoint(project_id, cluster_id), ] listener = messaging.get_notification_listener( transport, targets, endpoints, pool="senlin-listeners") listener.start() listener.wait()
def get_server_for_sp(sp): """Get notification listener for a particular service provider. The server can be run in the background under eventlet using .start() """ cfg = config.get_conf_for_sp(sp) transport = oslo_messaging.get_notification_transport(CONF, cfg.messagebus) targets = [oslo_messaging.Target(topic='notifications')] return oslo_messaging.get_notification_listener(transport, targets, get_endpoints_for_sp( cfg.sp_name), executor='eventlet')
def get_server_for_sp(sp): """Get notification listener for a particular service provider. The server can be run in the background under eventlet using .start() """ cfg = config.get_conf_for_sp(sp) transport = oslo_messaging.get_notification_transport(CONF, cfg.messagebus) targets = [oslo_messaging.Target(topic='notifications')] return oslo_messaging.get_notification_listener( transport, targets, get_endpoints_for_sp(cfg.sp_name), executor='eventlet')
def start(self): endpoints = [report_notification, track_instance, untrack_instance] transport = messaging.get_notification_transport(CONF) s_target = target.Target(topic='murano', server=str(uuid.uuid4())) self.server = messaging.get_notification_listener(transport, [s_target], endpoints, executor='eventlet') self.server.start() super(NotificationService, self).start()
def get_listeners(self, handler): listeners = [] targets = [ oslo_messaging.Target(topic=self.config.collector.topic), ] for url in self.config.collector.transport_url: LOG.debug('Creating listener for %s', url) transport = self._get_transport(url) listeners.append(oslo_messaging.get_notification_listener(transport=transport, targets=targets, endpoints=[handler], executor='threading')) return listeners
def start(self): super(ListenerService, self).start() transport = oslo_messaging.get_transport(cfg.CONF) targets = [ oslo_messaging.Target(topic="notifications", exchange="glance") ] endpoints = [ NotificationEndpoint() ] listener = oslo_messaging.get_notification_listener( transport, targets, endpoints) listener.start() self.listeners.append(listener)
def initialize(self): self.network_map = util.create_network_map_from_config(CONF.ml2_vmware) self._bound_ports = set() session = db.get_session() rules = session.query(securitygroups_db.SecurityGroupRule).all() self.sgr_to_sg = {r['id']: r['security_group_id'] for r in rules} listener = oslo_messaging.get_notification_listener( n_rpc.TRANSPORT, targets=[oslo_messaging.Target(topic='vmware_dvs')], endpoints=[endpoints.SecurityGroupRuleCreateEndPoint(self), endpoints.SecurityGroupRuleDeleteEndPoint(self), endpoints.SecurityGroupCreateEndPoint(self), endpoints.SecurityGroupDeleteEndPoint(self)], executor='eventlet') listener.start()
def start(self): super(ListenerService, self).start() transport = oslo_messaging.get_transport(cfg.CONF) # TODO(sjmc7): This needs to come from the plugins, and from config # options rather than hardcoded. Refactor this out to a function # returning the set of topic,exchange pairs targets = [ oslo_messaging.Target(topic="notifications", exchange="glance") ] endpoints = [ NotificationEndpoint() ] listener = oslo_messaging.get_notification_listener( transport, targets, endpoints) listener.start() self.listeners.append(listener)
def _setup_listener(self, transport, endpoints, targets=None, pool=None): if pool is None: tracker_name = "__default__" else: tracker_name = pool if targets is None: targets = [oslo_messaging.Target(topic="testtopic")] tracker = self.trackers.setdefault(tracker_name, self.ThreadTracker()) listener = oslo_messaging.get_notification_listener( transport, targets=targets, endpoints=[tracker] + endpoints, allow_requeue=True, pool=pool ) thread = RestartableServerThread(listener) tracker.start(thread) return thread
def start(self): super(NotificationService, self).start() transport = bilean_messaging.get_transport() targets = [ oslo_messaging.Target(topic=tp, exchange=eg) for tp, eg in self.topics_exchanges_set ] endpoints = [endpoint.EventsNotificationEndpoint()] listener = oslo_messaging.get_notification_listener( transport, targets, endpoints, pool=CONF.listener.notifications_pool) listener.start() self.listeners.append(listener) # Add a dummy thread to have wait() working self.tg.add_timer(604800, lambda: None)
def start(self): super(ListenerService, self).start() transport = oslo_messaging.get_notification_transport(CONF) targets = [ oslo_messaging.Target(topic=pl_topic, exchange=pl_exchange) for pl_topic, pl_exchange in self.topics_exchanges_set ] endpoints = [ NotificationEndpoint(self.plugins, PipelineManager(self.plugins)) ] listener = oslo_messaging.get_notification_listener( transport, targets, endpoints, executor='threading', pool=CONF.listener.notifications_pool) listener.start() self.listeners.append(listener)
def test_constructor(self, deprecate): transport = msg_notifier.get_notification_transport( self.conf, url='fake:') target = oslo_messaging.Target(topic='foo') endpoints = [object()] listener = oslo_messaging.get_notification_listener( transport, [target], endpoints) self.assertIs(listener.conf, self.conf) self.assertIs(listener.transport, transport) self.assertIsInstance(listener.dispatcher, dispatcher.NotificationDispatcher) self.assertIs(listener.dispatcher.endpoints, endpoints) self.assertEqual('blocking', listener.executor_type) deprecate.assert_called_once_with( 'blocking executor is deprecated. Executor default will be ' 'removed. Use explicitly threading or eventlet instead', removal_version='rocky', version='pike', category=FutureWarning)
def create_notification_listener(self, endpoints, exchange=None, topic='notifications'): """Creates an oslo.messaging notification listener associated with provided endpoints. Adds the resulting listener to the pool of RPC server threads. :param endpoints: list of endpoint objects that define methods for processing prioritized notifications :param exchange: Optional control exchange to listen on. If not specified, oslo_messaging defaults to 'openstack' :param topic: Topic on which to listen for notification events """ transport = get_transport() target = get_target(topic=topic, fanout=False, exchange=exchange) pool = 'astara.' + topic + '.' + cfg.CONF.host server = oslo_messaging.get_notification_listener( transport, [target], endpoints, pool=pool) LOG.debug( 'Created RPC notification listener on topic:%s/exchange:%s.', topic, exchange) self._add_server_thread(server)
def main(): global counter global infinite_loop global LOG CONF(sys.argv[1:], project="load_consumer") log.setup(CONF, "load_consumer") LOG = log.getLogger(__name__) transport = oslo_messaging.get_transport(cfg.CONF) targets = [oslo_messaging.Target(topic=CONF.notif_topic_name)] endpoints = [NotificationEndpoint()] server = oslo_messaging.get_notification_listener(transport, targets, endpoints) threading.Thread(target=server.start).start() # LOG.info('after threading') infinite_loop = CONF.infinite_loop last_counter = -1 try: while last_counter < counter or infinite_loop: last_counter = counter time.sleep(1) except OSError: # That is how keyboard interrupt appears here pass server.stop() server.wait() print("Consumed %i messages" % counter)
def main(): if len(sys.argv) < 2: print("Supply an exchange") sys.exit(0) exchange = sys.argv[1] pool = sys.argv[2] if len(sys.argv) > 2 else None transport = oslo_messaging.get_notification_transport( cfg.CONF, url='rabbit://%s:%s@%s' % (username, password, host)) targets = [oslo_messaging.Target(topic=topic, exchange=exchange)] endpoints = [EP()] oslo_listener = oslo_messaging.get_notification_listener( transport, targets, endpoints, pool=pool, executor='threading') try: print("Started") oslo_listener.start() while True: time.sleep(1) except KeyboardInterrupt: print("Stopping") oslo_listener.stop() oslo_listener.wait()
def main(argv=None): _usage = """Usage: %prog [options] <target, ...>""" parser = optparse.OptionParser(usage=_usage) parser.add_option("--name", action="store", default=uuid.uuid4().hex) parser.add_option("--url", action="store", default="qpid://localhost") parser.add_option("--exchange", action="store") parser.add_option("--namespace", action="store") parser.add_option("--pool", action="store") parser.add_option("--quiet", action="store_true", default=False, help="Supress console output") opts, topics = parser.parse_args(args=argv) if not topics: if not opts.quiet: print("missing topics!") return -1 if not opts.quiet: print("listener %s: url=%s, topics=%s" % (opts.name, opts.url, topics)) targets = [oslo_messaging.Target(exchange=opts.exchange, topic=t, namespace=opts.namespace) for t in topics] transport = oslo_messaging.get_transport(cfg.CONF, url=opts.url) listener = oslo_messaging.get_notification_listener(transport, targets, [TestNotificationEndpoint(opts.name)], pool=opts.pool) try: listener.start() while True: time.sleep(1) except KeyboardInterrupt: print("Stopping..") listener.stop() listener.wait() return 0
def setup_sg_rpc_callbacks(self): # this will listen for the same notifications that the l2 agent uses. # This is triggered whenever security group rules change or members # of a security group change. # The functions that will be called directly correspond to the names # defined in the cast calls in # neutron/api/rpc/handlers/securitygroups_rpc.py self.connection = agent_rpc.create_consumers( [self], topics.AGENT, [[topics.SECURITY_GROUP, topics.UPDATE]]) # the above does not cover the cases where security groups are # initially created or when they are deleted since those actions # aren't needed by the L2 agent. In order to receive those, we # subscribe to the notifications topic that receives all of the # API create/update/delete events. # Notifications are published at the 'info' level so they will result # in a call to the 'info' function below. From there we can check # the event type and determine what to do from there. target = oslo_messaging.Target(topic='notifications', server=cfg.CONF.host) self.listener = oslo_messaging.get_notification_listener( n_rpc.TRANSPORT, [target], [self], executor='eventlet', allow_requeue=False) self.listener.start()
def main(): register_keystoneauth_opts(CONF) CONF(sys.argv[1:], version='1.0.10', default_config_files=config.find_config_files()) logging.setup(CONF, 'join') transport = oslo_messaging.get_transport(CONF) targets = [oslo_messaging.Target(topic='notifications')] endpoints = [NotificationEndpoint()] server = oslo_messaging.get_notification_listener(transport, targets, endpoints, executor='threading', allow_requeue=True) LOG.info("Starting") server.start() try: while True: time.sleep(1) except KeyboardInterrupt: LOG.info("Stopping, be patient") server.stop() server.wait()
def _get_server(self, transport, targets): return oslo_messaging.get_notification_listener( transport.transport, targets, [self], 'eventlet')