def test_unknown_executor(self): transport = messaging.get_transport(self.conf, url='fake:') try: messaging.get_notification_listener(transport, [], [], executor='foo') except Exception as ex: self.assertIsInstance(ex, messaging.ExecutorLoadFailure) self.assertEqual('foo', ex.executor) else: self.assertTrue(False)
def start(self): """Bind the UDP socket and handle incoming data.""" # ensure dispatcher is configured before starting other services self.dispatcher_manager = dispatcher.load_dispatcher_manager() self.rpc_server = None self.notification_server = None super(CollectorService, self).start() if cfg.CONF.collector.udp_address: self.tg.add_thread(self.start_udp) allow_requeue = cfg.CONF.collector.requeue_sample_on_dispatcher_error transport = messaging.get_transport(optional=True) if transport: self.rpc_server = messaging.get_rpc_server( transport, cfg.CONF.publisher_rpc.metering_topic, self) target = oslo.messaging.Target( topic=cfg.CONF.publisher_notifier.metering_topic) self.notification_server = messaging.get_notification_listener( transport, [target], [self], allow_requeue=allow_requeue) self.rpc_server.start() self.notification_server.start() if not cfg.CONF.collector.udp_address: # Add a dummy thread to have wait() working self.tg.add_timer(604800, lambda: None)
def _setup_listener(self, transport, endpoints, expect_messages, targets=None, pool=None): if pool is None: tracker_name = '__default__' else: tracker_name = pool if targets is None: targets = [messaging.Target(topic='testtopic')] tracker = self.trackers.setdefault( tracker_name, self.ListenerTracker(expect_messages)) listener = messaging.get_notification_listener(transport, targets=targets, endpoints=[tracker] + endpoints, allow_requeue=True, pool=pool) tracker.listeners.append(listener) thread = RestartableListenerThread(listener) thread.start() return thread
def main(self): # TODO: clean up and make more configurable! logger = logging.getLogger('katello_notification') logger.setLevel(self._get_loglevel()) handler = logging.FileHandler(LOG_LOCATION) formatter = logging.Formatter('%(asctime)s - %(name)s - @%(filename)s %(levelname)s - %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) # quick config check mgmt_server = self._katello_or_spacewalk() if mgmt_server == 'katello': payload_actions = KatelloPayloadActions() elif mgmt_server == 'spacewalk': payload_actions = SpacewalkPayloadActions() else: logger.error("mgmt server not set to 'katello' or 'spacewalk', aborting") # set up transport and listener transport = messaging.get_transport(cfg.CONF, url=self._get_amqp_url()) targets = [ messaging.Target(topic='subscription_notifications', exchange='nova'), ] endpoints = [ NotificationEndpoint(payload_actions), ] server = messaging.get_notification_listener(transport, targets, endpoints) logger.info("listener initialized") server.start() server.wait()
def start(self): verstr = version.version_string() LOG.info(_('Starting %(topic)s node (version %(version)s)'), {'topic': self.topic, 'version': verstr}) self.basic_config_check() #endpoints.extend(self.manager.additional_endpoints) targets = [] endpoints = [] for x in self.topic: targets.append(messaging.Target(topic=x)) endpoints.append(self.manager) listener = messaging.get_notification_listener(rpc.TRANSPORT, targets, endpoints, allow_requeue=True) if self.periodic_enable: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None self.tg.add_dynamic_timer(self.periodic_tasks, initial_delay=initial_delay, periodic_interval_max= self.periodic_interval_max) listener.start()
def get_listener(targets, endpoints, serializer=None): assert TRANSPORT is not None serializer = RequestContextSerializer(serializer) return messaging.get_notification_listener(TRANSPORT, targets, endpoints, executor='eventlet', serializer=serializer)
def notify_listener(): eventlet.monkey_patch() target = messaging.Target(topic='notifications', server='server1', version='1.0') server = messaging.get_notification_listener(TRANSPORT, [target], [TestEndpoint()], executor='eventlet') #server = messaging.get_notification_listener(TRANSPORT, [target], [TestEndpoint()], executor='blocking') server.start() server.wait()
def setUp(self): super(NotificationFixture, self).setUp() targets = [messaging.Target(topic=t) for t in self.topics] # add a special topic for internal notifications targets.append(messaging.Target(topic=self.name)) self.server = messaging.get_notification_listener( self.transport, targets, [self]) self._ctrl = self.notifier('internal', topic=self.name) self._start()
def get_listener(targets, endpoints, serializer=None): assert TRANSPORT is not None if serializer is None: serializer = JsonPayloadSerializer() return messaging.get_notification_listener(TRANSPORT, targets, endpoints, executor='eventlet', serializer=serializer)
def launch_agent_listener(transport): notification_target = messaging.Target( topic=cfg.CONF.agent.notifications ) agent_listener = messaging.get_notification_listener(transport, [notification_target], [def_agent.DefaultAgentNotificationEndPoint()]) agent_listener.start() time.sleep(0) agent_listener.wait()
def initialize(self): self.network_map = util.create_network_map_from_config(CONF.ml2_vmware) listener = oslo_messaging.get_notification_listener( n_rpc.TRANSPORT, targets=[oslo_messaging.Target(topic='vmware_dvs')], endpoints=[endpoints.SecurityGroupRuleCreateEndPoint(self), endpoints.SecurityGroupRuleDeleteEndPoint(self)], executor='eventlet') listener.start()
def get_agentlistner(): global _AGENT_LISTENER if not _AGENT_LISTENER: targets = [messaging.Target(topic=cfg.CONF.agent.notifications)] endpoints = [def_agent.DefaultAgentNotificationEndPoint()] _AGENT_LISTENER = messaging.get_notification_listener( get_transport(), targets, endpoints, executor='eventlet') return _AGENT_LISTENER
def event_handler(self): """Prepare connection and channels for listenning to the events.""" topicname = self.get_notif_params().get('notification_topics') transport = messaging.get_transport(cfg.CONF) targets = [messaging.Target(topic=topicname)] endpoints = [NotificationEndpoint(self)] server = messaging.get_notification_listener(transport, targets, endpoints) server.start() server.wait()
def setUp(self): super(NotificationFixture, self).setUp() targets = [messaging.Target(topic=t) for t in self.topics] # add a special topic for internal notifications targets.append(messaging.Target(topic=self.name)) self.server = messaging.get_notification_listener(self.transport, targets, [self]) self._ctrl = self.notifier('internal', topic=self.name) self._start()
def launch_monitor_listener(transport): notification_target = messaging.Target( topic=cfg.CONF.monitor.notifications ) monitor_listener = messaging.get_notification_listener(transport, [notification_target], [def_monitor.DefaultMonitorNotificationEndPoint( 'Test-Monitor')]) monitor_listener.start() time.sleep(1) monitor_listener.wait()
def get_monitorlistner(): global _MONITOR_LISTENER if not _MONITOR_LISTENER: targets = [messaging.Target(topic=cfg.CONF.monitor.notifications)] endpoints = [ def_monitor.DefaultMonitorNotificationEndPoint('defualt_monitor') ] _MONITOR_LISTENER = messaging.get_notification_listener( get_transport(), targets, endpoints, executor='eventlet') return _MONITOR_LISTENER
def test_no_target_topic(self): transport = messaging.get_transport(self.conf, url='fake:') listener = messaging.get_notification_listener(transport, [messaging.Target()], [mock.Mock()]) try: listener.start() except Exception as ex: self.assertIsInstance(ex, messaging.InvalidTarget, ex) else: self.assertTrue(False)
def main(): config.parse_args(sys.argv) logging.setup('quaker') messaging.set_transport_defaults('payload') transport = messaging.get_transport(cfg.CONF) targets = [messaging.Target(topic='notifications')] endpoints = [ NotificationEndpoint(), ] server = messaging.get_notification_listener(transport, targets, endpoints) server.start() server.wait()
def test_constructor(self): transport = messaging.get_transport(self.conf, url='fake:') target = messaging.Target(topic='foo') endpoints = [object()] listener = messaging.get_notification_listener(transport, [target], endpoints) self.assertIs(listener.conf, self.conf) self.assertIs(listener.transport, transport) self.assertIsInstance(listener.dispatcher, dispatcher.NotificationDispatcher) self.assertIs(listener.dispatcher.endpoints, endpoints) self.assertEqual('blocking', listener.executor)
def _configure_pipeline_listeners(self): if cfg.CONF.notification.workload_partitioning: partitioned = self.partition_coordinator.extract_my_subset( self.group_id, self.pipeline_manager.pipelines) transport = messaging.get_transport() for pipe in partitioned: LOG.debug(_('Pipeline endpoint: %s'), pipe.name) listener = messaging.get_notification_listener( transport, [oslo.messaging.Target( topic='%s-%s' % (self.NOTIFICATION_IPC, pipe.name))], [pipeline.PipelineEndpoint(self.ctxt, pipe)]) listener.start() self.pipeline_listeners.append(listener)
def _configure_pipeline_listeners(self): self.pipeline_listeners = [] partitioned = self.partition_coordinator.extract_my_subset( self.group_id, self.pipeline_manager.pipelines) transport = messaging.get_transport() for pipe in partitioned: LOG.debug(_('Pipeline endpoint: %s'), pipe.name) listener = messaging.get_notification_listener( transport, [oslo.messaging.Target( topic='%s-%s' % (self.NOTIFICATION_IPC, pipe.name))], [pipeline.PipelineEndpoint(self.ctxt, pipe)]) listener.start() self.pipeline_listeners.append(listener)
def main(): config.parse_args(sys.argv) logging.setup('quaker') messaging.set_transport_defaults('payload') transport = messaging.get_transport(cfg.CONF) targets = [ messaging.Target(topic='notifications') ] endpoints = [ NotificationEndpoint(), ] server = messaging.get_notification_listener(transport, targets, endpoints) server.start() server.wait()
def start(self): super(ListenerService, self).start() transport = messaging.get_transport(cfg.CONF) targets = [ messaging.Target(topic="notifications", exchange="glance") ] endpoints = [ NotificationEndpoint() ] listener = messaging.get_notification_listener( transport, targets, endpoints) listener.start() self.listeners.append(listener)
def start(self): super(ListenerService, self).start() transport = messaging.get_transport(cfg.CONF) notification_endpoint = NotificationEndpoint() # Get a list of exchanges and topics to listen for targets = [ messaging.Target(topic=topic, exchange=exchange) for topic, exchange in notification_endpoint.topics_and_exchanges() ] endpoints = [notification_endpoint] listener = messaging.get_notification_listener(transport, targets, endpoints) listener.start() self.listeners.append(listener)
def _configure_main_queue_listeners(self, pipe_manager, event_pipe_manager): notification_manager = self._get_notifications_manager(pipe_manager) if not list(notification_manager): LOG.warning(_('Failed to load any notification handlers for %s'), self.NOTIFICATION_NAMESPACE) ack_on_error = cfg.CONF.notification.ack_on_event_error endpoints = [] if cfg.CONF.notification.store_events: endpoints.append( event_endpoint.EventsNotificationEndpoint(event_pipe_manager)) targets = [] for ext in notification_manager: handler = ext.obj if (cfg.CONF.notification.disable_non_metric_meters and isinstance(handler, base.NonMetricNotificationBase)): continue LOG.debug( _('Event types from %(name)s: %(type)s' ' (ack_on_error=%(error)s)') % { 'name': ext.name, 'type': ', '.join(handler.event_types), 'error': ack_on_error }) # NOTE(gordc): this could be a set check but oslo.messaging issue # https://bugs.launchpad.net/oslo.messaging/+bug/1398511 # This ensures we don't create multiple duplicate consumers. for new_tar in handler.get_targets(cfg.CONF): if new_tar not in targets: targets.append(new_tar) endpoints.append(handler) urls = cfg.CONF.notification.messaging_urls or [None] for url in urls: transport = messaging.get_transport(url) listener = messaging.get_notification_listener( transport, targets, endpoints) listener.start() self.listeners.append(listener)
def _configure_pipeline_listeners(self): self.pipeline_listeners = [] ev_pipes = [] if cfg.CONF.notification.store_events: ev_pipes = self.event_pipeline_manager.pipelines partitioned = self.partition_coordinator.extract_my_subset( self.group_id, self.pipeline_manager.pipelines + ev_pipes) transport = messaging.get_transport() for pipe in partitioned: LOG.debug(_('Pipeline endpoint: %s'), pipe.name) pipe_endpoint = (pipeline.EventPipelineEndpoint if isinstance(pipe, pipeline.EventPipeline) else pipeline.SamplePipelineEndpoint) listener = messaging.get_notification_listener( transport, [oslo.messaging.Target( topic='%s-%s' % (self.NOTIFICATION_IPC, pipe.name))], [pipe_endpoint(self.ctxt, pipe)]) listener.start() self.pipeline_listeners.append(listener)
def _configure_pipeline_listeners(self): self.pipeline_listeners = [] ev_pipes = [] if cfg.CONF.notification.store_events: ev_pipes = self.event_pipeline_manager.pipelines partitioned = self.partition_coordinator.extract_my_subset( self.group_id, self.pipeline_manager.pipelines + ev_pipes) transport = messaging.get_transport() for pipe in partitioned: LOG.debug(_('Pipeline endpoint: %s'), pipe.name) pipe_endpoint = (pipeline.EventPipelineEndpoint if isinstance( pipe, pipeline.EventPipeline) else pipeline.SamplePipelineEndpoint) listener = messaging.get_notification_listener( transport, [ oslo.messaging.Target(topic='%s-%s' % (self.NOTIFICATION_IPC, pipe.name)) ], [pipe_endpoint(self.ctxt, pipe)]) listener.start() self.pipeline_listeners.append(listener)
def start(self): super(ListenerService, self).start() transport = messaging.get_transport(cfg.CONF) notification_endpoint = NotificationEndpoint() # Get a list of exchanges and topics to listen for targets = [ messaging.Target(topic=topic, exchange=exchange) for topic, exchange in notification_endpoint.topics_and_exchanges() ] endpoints = [ notification_endpoint ] listener = messaging.get_notification_listener( transport, targets, endpoints) listener.start() self.listeners.append(listener)
def _setup_listener(self, transport, endpoints, targets=None, pool=None): if pool is None: tracker_name = '__default__' else: tracker_name = pool if targets is None: targets = [messaging.Target(topic='testtopic')] tracker = self.trackers.setdefault( tracker_name, self.ThreadTracker()) listener = messaging.get_notification_listener( transport, targets=targets, endpoints=[tracker] + endpoints, allow_requeue=True, pool=pool) thread = RestartableServerThread(listener) tracker.start(thread) return thread
def start(self): self.dispatcher_manager = dispatcher.load_dispatcher_manager() self.notification_server = None super(ConsumerService, self).start() allow_requeue = cfg.CONF.consumer.requeue_on_error transport = messaging.get_transport(optional=True) if transport: self.rpc_server = messaging.get_rpc_server( transport, cfg.CONF.consumer.topic, self) target = oslo.messaging.Target( topic=cfg.CONF.consumer.topic) self.notification_server = messaging.get_notification_listener( transport, [target], [ConsumerEndpoint(self.dispatcher_manager, 'record_metering_data', cfg.CONF.consumer.priority)], allow_requeue=allow_requeue) self.notification_server.start()
def _configure_main_queue_listeners(self, pipe_manager, event_pipe_manager): notification_manager = self._get_notifications_manager(pipe_manager) if not list(notification_manager): LOG.warning(_('Failed to load any notification handlers for %s'), self.NOTIFICATION_NAMESPACE) ack_on_error = cfg.CONF.notification.ack_on_event_error endpoints = [] if cfg.CONF.notification.store_events: endpoints.append( event_endpoint.EventsNotificationEndpoint(event_pipe_manager)) targets = [] for ext in notification_manager: handler = ext.obj if (cfg.CONF.notification.disable_non_metric_meters and isinstance(handler, base.NonMetricNotificationBase)): continue LOG.debug(_('Event types from %(name)s: %(type)s' ' (ack_on_error=%(error)s)') % {'name': ext.name, 'type': ', '.join(handler.event_types), 'error': ack_on_error}) # NOTE(gordc): this could be a set check but oslo.messaging issue # https://bugs.launchpad.net/oslo.messaging/+bug/1398511 # This ensures we don't create multiple duplicate consumers. for new_tar in handler.get_targets(cfg.CONF): if new_tar not in targets: targets.append(new_tar) endpoints.append(handler) urls = cfg.CONF.notification.messaging_urls or [None] for url in urls: transport = messaging.get_transport(url) listener = messaging.get_notification_listener( transport, targets, endpoints) listener.start() self.listeners.append(listener)
def start(self): self.dispatcher_manager = dispatcher.load_dispatcher_manager() self.notification_server = None super(ConsumerService, self).start() allow_requeue = cfg.CONF.consumer.requeue_on_error transport = messaging.get_transport(optional=True) if transport: self.rpc_server = messaging.get_rpc_server(transport, cfg.CONF.consumer.topic, self) target = oslo.messaging.Target(topic=cfg.CONF.consumer.topic) self.notification_server = messaging.get_notification_listener( transport, [target], [ ConsumerEndpoint(self.dispatcher_manager, 'record_metering_data', cfg.CONF.consumer.priority) ], allow_requeue=allow_requeue) self.notification_server.start()
def main(argv=sys.argv[1:]): try: LOG.info('configuring connection') transport_url = CONFIG.get("DEFAULT", "transport_url") transport = messaging.get_transport(cfg.CONF, transport_url) targets = [messaging.Target(topic='brcd',exchange='nova')] #targets = [messaging.Target(topic='brcd'] endpoints = [NotificationHandler()] server = messaging.get_notification_listener(transport, targets, endpoints, allow_requeue=True, executor='eventlet') LOG.info('starting up server') server.start() LOG.info('waiting for nova events/notifications') server.wait() except KeyboardInterrupt: print("... exiting brocade nova listener") return 130 except Exception as e: print(e) return 1
logging.basicConfig() log = logging.getLogger() log.addHandler(logging.StreamHandler()) log.setLevel(logging.INFO) class NotificationHandler(object): def info(self, ctxt, publisher_id, event_type, payload, metadata): if publisher_id == 'testing': log.info('Handled') return messaging.NotificationResult.HANDLED def warn(self, ctxt, publisher_id, event_type, payload, metadata): log.info('WARN') def error(self, ctxt, publisher_id, event_type, payload, metadata): log.info('ERROR') log.info('Configuring connection') transport_url = 'rabbit://*****:*****@127.0.0.1:5672/' transport = messaging.get_transport(cfg.CONF, transport_url) targets = [messaging.Target(topic='monitor')] endpoints = [NotificationHandler()] server = messaging.get_notification_listener(transport, targets, endpoints, allow_requeue=True, executor='eventlet') log.info('Starting up server') server.start() log.info('Waiting for something') server.wait()
class NotificationHandler(object): def info(self, ctxt, publisher_id, event_type, payload, metadata): if publisher_id == 'testing': log.info('Handled') return messaging.NotificationResult.HANDLED def warn(self, ctxt, publisher_id, event_type, payload, metadata): log.info('WARN') def error(self, ctxt, publisher_id, event_type, payload, metadata): log.info('ERROR') log.info('Configuring connection') transport_url = 'rabbit://*****:*****@127.0.0.1:5672/' transport = messaging.get_transport(cfg.CONF, transport_url) targets = [messaging.Target(topic='monitor')] endpoints = [NotificationHandler()] server = messaging.get_notification_listener(transport, targets, endpoints, allow_requeue=True, executor='eventlet') log.info('Starting up server') server.start() log.info('Waiting for something') server.wait()
def __init__(self, transport, targets, endpoints, expect_messages): self._expect_messages = expect_messages self._received_msgs = 0 self._listener = messaging.get_notification_listener( transport, targets, [self] + endpoints, allow_requeue=True)
def main(argv): size = 10**6 msg_num = 100 sthreads = 1 rabbit_username = '' rabbit_password = '' rabbit_host_srv = '' rabbit_host_client = '' try: opts, args = getopt.getopt(argv,"hr:s:u:p:z:n:t:",["--receiver","--sender","--password","--user","--size","--num","--sthreads"]) except getopt.GetoptError: help() sys.exit(2) for opt, arg in opts: if opt == '-h': help() sys.exit() elif opt in ("-z", "--size"): size = int(arg) elif opt in ("-n", "--num"): msg_num = int(arg) elif opt in ("-r", "--receiver"): rabbit_host_srv = arg elif opt in ("-s", "--sender"): rabbit_host_client = arg elif opt in ("-u", "--user"): rabbit_username = arg elif opt in ("-p", "--password"): rabbit_password = arg elif opt in ("-t","--sthreads"): sthreads = int(arg) if ( rabbit_host_srv == '' or rabbit_host_client == '' or rabbit_username == '' or rabbit_password == '' ) : help() print '--sender, --receiver, --password and --user are mandatory options' sys.exit(2) msg = "t" * size log.info('Configuring connection') transport_url_server = 'rabbit://'+rabbit_username+':'+rabbit_password+'@'+rabbit_host_srv+'/' transport_server = messaging.get_transport(cfg.CONF, transport_url_server) targets = [messaging.Target(topic='heavy_load')] endpoints = [NotificationHandler()] server = messaging.get_notification_listener(transport_server, targets, endpoints, allow_requeue=True, executor='eventlet') transport_url_client = 'rabbit://'+rabbit_username+':'+rabbit_password+'@'+rabbit_host_client+'/' transport_client = messaging.get_transport(cfg.CONF, transport_url_client) notifier = messaging.Notifier(transport_client, driver='messaging', publisher_id='testing', topic='heavy_load') def send(): for x in range(0, msg_num): notifier.info({'some': 'context'}, 'just.testing', {'heavy': msg }) def receive(): server.start() def watch(): while counter < msg_num*sthreads: time.sleep(1) server.stop() server.wait() print 'Serviced ', counter, ' messages in', time.clock() - time_start ,'seconds' try: receiver = threading.Thread(name='Receiver', target=receive) watcher = threading.Thread(name='Watcher', target=watch) receiver.start() time_start=time.clock() sender_threads = [] for i in range(sthreads): t = threading.Thread(target=send) sender_threads.append(t) t.start() watcher.start() watcher.join() except: print 'Unable to start threads'
pprint.pprint(payload) sys.stdout.flush() def sample(self, ctxt, publisher_id, event_type, payload, metadata): print "sample" pprint.pprint(payload) sys.stdout.flush() TRANSPORT_ALIASES = { 'designate.openstack.common.rpc.impl_kombu': 'rabbit', 'designate.openstack.common.rpc.impl_qpid': 'qpid', 'designate.openstack.common.rpc.impl_zmq': 'zmq', 'designate.rpc.impl_kombu': 'rabbit', 'designate.rpc.impl_qpid': 'qpid', 'designate.rpc.impl_zmq': 'zmq', } utils.read_config('designate', sys.argv) logging.setup('designate') transport = messaging.get_transport(cfg.CONF, aliases=TRANSPORT_ALIASES) targets = [ messaging.Target(exchange='nova', topic='notifications'), messaging.Target(exchange='neutron', topic='notifications') ] endpoints = [NotificationEndpoint()] server = messaging.get_notification_listener(transport, targets, endpoints) server.start() server.wait()
'warning', 'error', 'audit', 'debug' ] def __init__(self): super(EntryEndpoint, self).__init__() def _notify(ctxt, publisher_id, event_type, payload, metadata): _cons.consolidate(ctxt, event_type, payload) #set method for each priority for p in self.allowed_priorities: setattr(self, p, _notify) if __name__ == '__main__': from sim import config config.init_conf() transport = messaging.get_transport(cfg.CONF) targets = [messaging.Target(topic='notifications'),] endpoints = [EntryEndpoint()] server = messaging.get_notification_listener(transport, targets, endpoints) # the default executor is blocking! # se every request is executed as in a queue! # even in openstack they do this: # https://github.com/openstack/ceilometer/blob/master/ceilometer/notification.py#L104 server.start() server.wait()