Beispiel #1
0
    def run(self):
        # Delay startup so workers are jittered
        time.sleep(self.startup_delay)

        super(NotificationService, self).run()

        self.managers = [ext.obj for ext in named.NamedExtensionManager(
            namespace='ceilometer.notification.pipeline',
            names=self.conf.notification.pipelines, invoke_on_load=True,
            on_missing_entrypoints_callback=self._log_missing_pipeline,
            invoke_args=(self.conf,))]

        # FIXME(sileht): endpoint uses the notification_topics option
        # and it should not because this is an oslo_messaging option
        # not a ceilometer. Until we have something to get the
        # notification_topics in another way, we must create a transport
        # to ensure the option has been registered by oslo_messaging.
        messaging.get_notifier(messaging.get_transport(self.conf), '')

        endpoints = []
        for pipe_mgr in self.managers:
            endpoints.extend(pipe_mgr.get_main_endpoints())
        targets = self.get_targets()

        urls = self.conf.notification.messaging_urls or [None]
        for url in urls:
            transport = messaging.get_transport(self.conf, url)
            # NOTE(gordc): ignore batching as we want pull
            # to maintain sequencing as much as possible.
            listener = messaging.get_batch_notification_listener(
                transport, targets, endpoints, allow_requeue=True)
            listener.start(
                override_pool_size=self.conf.max_parallel_requests
            )
            self.listeners.append(listener)
Beispiel #2
0
    def start(self):
        super(NotificationService, self).start()
        # FIXME(sileht): endpoint use notification_topics option
        # and it should not because this is oslo.messaging option
        # not a ceilometer, until we have a something to get
        # the notification_topics in an other way
        # we must create a transport to ensure the option have
        # beeen registered by oslo.messaging
        transport = messaging.get_transport()
        messaging.get_notifier(transport, '')

        self.pipeline_manager = pipeline.setup_pipeline()

        self.notification_manager = self._get_notifications_manager(
            self.pipeline_manager)
        if not list(self.notification_manager):
            LOG.warning(_('Failed to load any notification handlers for %s'),
                        self.NOTIFICATION_NAMESPACE)

        ack_on_error = cfg.CONF.notification.ack_on_event_error

        endpoints = []
        if cfg.CONF.notification.store_events:
            endpoints = [event_endpoint.EventsNotificationEndpoint()]

        targets = []
        for ext in self.notification_manager:
            handler = ext.obj
            LOG.debug(_('Event types from %(name)s: %(type)s'
                        ' (ack_on_error=%(error)s)') %
                      {'name': ext.name,
                       'type': ', '.join(handler.event_types),
                       'error': ack_on_error})
            # NOTE(gordc): this could be a set check but oslo.messaging issue
            # https://bugs.launchpad.net/oslo.messaging/+bug/1398511
            # This ensures we don't create multiple duplicate consumers.
            for new_tar in handler.get_targets(cfg.CONF):
                if new_tar not in targets:
                    targets.append(new_tar)
            endpoints.append(handler)

        urls = cfg.CONF.notification.messaging_urls or [None]
        self.listeners = []
        for url in urls:
            transport = messaging.get_transport(url)
            listener = messaging.get_notification_listener(
                transport, targets, endpoints)
            listener.start()
            self.listeners.append(listener)

        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
Beispiel #3
0
    def start(self):
        super(NotificationService, self).start()
        self.partition_coordinator = None
        self.coord_lock = threading.Lock()

        self.listeners = []

        # NOTE(kbespalov): for the pipeline queues used a single amqp host
        # hence only one listener is required
        self.pipeline_listener = None

        self.pipeline_manager = pipeline.setup_pipeline()

        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        self.transport = messaging.get_transport()

        if cfg.CONF.notification.workload_partitioning:
            self.ctxt = context.get_admin_context()
            self.group_id = self.NOTIFICATION_NAMESPACE
            self.partition_coordinator = coordination.PartitionCoordinator()
            self.partition_coordinator.start()
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')
            self.group_id = None

        self.pipe_manager = self._get_pipe_manager(self.transport,
                                                   self.pipeline_manager)
        self.event_pipe_manager = self._get_event_pipeline_manager(
            self.transport)

        self._configure_main_queue_listeners(self.pipe_manager,
                                             self.event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.partition_coordinator.join_group(self.group_id)
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)
            self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                              self.partition_coordinator.heartbeat)
            self.tg.add_timer(cfg.CONF.coordination.check_watchers,
                              self.partition_coordinator.run_watchers)
            # configure pipelines after all coordination is configured.
            self._configure_pipeline_listener()

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(_LW('Non-metric meters may be collected. It is highly '
                            'advisable to disable these meters using '
                            'ceilometer.conf or the pipeline.yaml'))
        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)

        self.init_pipeline_refresh()
Beispiel #4
0
    def __init__(self):
        super(PartitionedAlarmService, self).__init__()
        transport = messaging.get_transport()
        self.rpc_server = messaging.get_rpc_server(
            transport, cfg.CONF.alarm.partition_rpc_topic, self)

        self.partition_coordinator = alarm_coordination.PartitionCoordinator()
Beispiel #5
0
 def __init__(self):
     super(AlarmNotifierService, self).__init__()
     transport = messaging.get_transport()
     self.rpc_server = messaging.get_rpc_server(
         transport, cfg.CONF.alarm.notifier_rpc_topic, self)
     self.notifiers = extension.ExtensionManager(self.EXTENSIONS_NAMESPACE,
                                                 invoke_on_load=True)
Beispiel #6
0
 def _configure_pipeline_listeners(self):
     self.pipeline_listeners = []
     ev_pipes = []
     if cfg.CONF.notification.store_events:
         ev_pipes = self.event_pipeline_manager.pipelines
     pipelines = self.pipeline_manager.pipelines + ev_pipes
     transport = messaging.get_transport()
     partitioned = self.partition_coordinator.extract_my_subset(
         self.group_id,
         range(cfg.CONF.notification.pipeline_processing_queues))
     for pipe_set in partitioned:
         for pipe in pipelines:
             LOG.debug('Pipeline endpoint: %s from set: %s', pipe.name,
                       pipe_set)
             pipe_endpoint = (pipeline.EventPipelineEndpoint
                              if isinstance(pipe, pipeline.EventPipeline)
                              else pipeline.SamplePipelineEndpoint)
             listener = messaging.get_notification_listener(
                 transport,
                 [oslo_messaging.Target(
                     topic='%s-%s-%s' % (self.NOTIFICATION_IPC,
                                         pipe.name, pipe_set))],
                 [pipe_endpoint(self.ctxt, pipe)])
             listener.start()
             self.pipeline_listeners.append(listener)
Beispiel #7
0
def get_notifier(conf):
    return oslo_messaging.Notifier(
        messaging.get_transport(conf),
        driver='messagingv2',
        publisher_id='telemetry.publisher.test',
        topics=['metering'],
    )
Beispiel #8
0
    def run(self):
        if self.conf.collector.udp_address:
            self.udp_thread = utils.spawn_thread(self.start_udp)

        transport = messaging.get_transport(self.conf, optional=True)
        if transport:
            if list(self.meter_manager):
                sample_target = oslo_messaging.Target(
                    topic=self.conf.publisher_notifier.metering_topic)
                self.sample_listener = (
                    messaging.get_batch_notification_listener(
                        transport, [sample_target],
                        [SampleEndpoint(self.conf.publisher.telemetry_secret,
                                        self.meter_manager)],
                        allow_requeue=True,
                        batch_size=self.conf.collector.batch_size,
                        batch_timeout=self.conf.collector.batch_timeout))
                self.sample_listener.start()

            if list(self.event_manager):
                event_target = oslo_messaging.Target(
                    topic=self.conf.publisher_notifier.event_topic)
                self.event_listener = (
                    messaging.get_batch_notification_listener(
                        transport, [event_target],
                        [EventEndpoint(self.conf.publisher.telemetry_secret,
                                       self.event_manager)],
                        allow_requeue=True,
                        batch_size=self.conf.collector.batch_size,
                        batch_timeout=self.conf.collector.batch_timeout))
                self.event_listener.start()
Beispiel #9
0
    def _configure_main_queue_listeners(self, pipe_manager, event_pipe_manager):
        notification_manager = self._get_notifications_manager(pipe_manager)
        if not list(notification_manager):
            LOG.warning(_("Failed to load any notification handlers for %s"), self.NOTIFICATION_NAMESPACE)

        ack_on_error = self.conf.notification.ack_on_event_error

        endpoints = []
        endpoints.append(event_endpoint.EventsNotificationEndpoint(event_pipe_manager))

        targets = []
        for ext in notification_manager:
            handler = ext.obj
            if self.conf.notification.disable_non_metric_meters and isinstance(handler, base.NonMetricNotificationBase):
                continue
            LOG.debug(
                "Event types from %(name)s: %(type)s" " (ack_on_error=%(error)s)",
                {"name": ext.name, "type": ", ".join(handler.event_types), "error": ack_on_error},
            )
            # NOTE(gordc): this could be a set check but oslo_messaging issue
            # https://bugs.launchpad.net/oslo.messaging/+bug/1398511
            # This ensures we don't create multiple duplicate consumers.
            for new_tar in handler.get_targets(self.conf):
                if new_tar not in targets:
                    targets.append(new_tar)
            endpoints.append(handler)

        urls = self.conf.notification.messaging_urls or [None]
        for url in urls:
            transport = messaging.get_transport(self.conf, url)
            # NOTE(gordc): ignore batching as we want pull
            # to maintain sequencing as much as possible.
            listener = messaging.get_batch_notification_listener(transport, targets, endpoints)
            listener.start()
            self.listeners.append(listener)
    def start(self):
        super(NotificationService, self).start()
        self.pipeline_manager = pipeline.setup_pipeline()
        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        transport = messaging.get_transport()
        self.partition_coordinator = coordination.PartitionCoordinator()
        self.partition_coordinator.start()

        event_pipe_manager = None
        if cfg.CONF.notification.workload_partitioning:
            pipe_manager = pipeline.SamplePipelineTransportManager()
            for pipe in self.pipeline_manager.pipelines:
                pipe_manager.add_transporter(
                    (pipe.source.support_meter,
                     self._get_notifier(transport, pipe)))
            if cfg.CONF.notification.store_events:
                event_pipe_manager = pipeline.EventPipelineTransportManager()
                for pipe in self.event_pipeline_manager.pipelines:
                    event_pipe_manager.add_transporter(
                        (pipe.source.support_event,
                         self._get_notifier(transport, pipe)))

            self.ctxt = context.get_admin_context()
            self.group_id = self.NOTIFICATION_NAMESPACE
        else:
            # FIXME(sileht): endpoint use notification_topics option
            # and it should not because this is oslo_messaging option
            # not a ceilometer, until we have a something to get
            # the notification_topics in an other way
            # we must create a transport to ensure the option have
            # beeen registered by oslo_messaging
            messaging.get_notifier(transport, '')
            pipe_manager = self.pipeline_manager
            if cfg.CONF.notification.store_events:
                event_pipe_manager = self.event_pipeline_manager
            self.group_id = None

        self.listeners, self.pipeline_listeners = [], []
        self._configure_main_queue_listeners(pipe_manager, event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            self.partition_coordinator.join_group(self.group_id)
            self._configure_pipeline_listeners()
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)

            self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                              self.partition_coordinator.heartbeat)
            self.tg.add_timer(cfg.CONF.coordination.check_watchers,
                              self.partition_coordinator.run_watchers)

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(_LW('Non-metric meters may be collected. It is highly '
                            'advisable to disable these meters using '
                            'ceilometer.conf or the pipeline.yaml'))
        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
Beispiel #11
0
def get_notifier(config_file):
    service.prepare_service(argv=['/', '--config-file', config_file])
    return oslo_messaging.Notifier(
        messaging.get_transport(),
        driver='messagingv2',
        publisher_id='telemetry.publisher.test',
        topic='metering',
    )
Beispiel #12
0
def _send_notification(event, payload):
    notification = event.replace(" ", "_")
    notification = "alarm.%s" % notification
    transport = messaging.get_transport()
    notifier = messaging.get_notifier(transport, publisher_id="ceilometer.api")
    # FIXME(sileht): perhaps we need to copy some infos from the
    # pecan request headers like nova does
    notifier.info(context.RequestContext(), notification, payload)
Beispiel #13
0
    def __init__(self):
        super(PartitionedAlarmService, self).__init__()
        transport = messaging.get_transport()
        self.rpc_server = messaging.get_rpc_server(transport, cfg.CONF.alarm.partition_rpc_topic, self)

        self._load_evaluators()
        self.api_client = None
        self.partition_coordinator = coordination.PartitionCoordinator()
Beispiel #14
0
    def __init__(self, namespaces=None, pollster_list=None):
        namespaces = namespaces or ['compute', 'central']
        pollster_list = pollster_list or []
        group_prefix = cfg.CONF.polling.partitioning_group_prefix

        # features of using coordination and pollster-list are exclusive, and
        # cannot be used at one moment to avoid both samples duplication and
        # samples being lost
        if pollster_list and cfg.CONF.coordination.backend_url:
            raise PollsterListForbidden()

        super(AgentManager, self).__init__()

        def _match(pollster):
            """Find out if pollster name matches to one of the list."""
            return any(utils.match(pollster.name, pattern) for
                       pattern in pollster_list)

        if type(namespaces) is not list:
            namespaces = [namespaces]

        # we'll have default ['compute', 'central'] here if no namespaces will
        # be passed
        extensions = (self._extensions('poll', namespace).extensions
                      for namespace in namespaces)
        # get the extensions from pollster builder
        extensions_fb = (self._extensions_from_builder('poll', namespace)
                         for namespace in namespaces)
        if pollster_list:
            extensions = (moves.filter(_match, exts)
                          for exts in extensions)
            extensions_fb = (moves.filter(_match, exts)
                             for exts in extensions_fb)

        self.extensions = list(itertools.chain(*list(extensions))) + list(
            itertools.chain(*list(extensions_fb)))

        if self.extensions == []:
            raise EmptyPollstersList()

        self.discovery_manager = self._extensions('discover')
        self.context = context.RequestContext('admin', 'admin', is_admin=True)
        self.partition_coordinator = coordination.PartitionCoordinator()

        # Compose coordination group prefix.
        # We'll use namespaces as the basement for this partitioning.
        namespace_prefix = '-'.join(sorted(namespaces))
        self.group_prefix = ('%s-%s' % (namespace_prefix, group_prefix)
                             if group_prefix else namespace_prefix)

        self.notifier = oslo_messaging.Notifier(
            messaging.get_transport(),
            driver=cfg.CONF.publisher_notifier.telemetry_driver,
            publisher_id="ceilometer.polling")

        self._keystone = None
        self._keystone_last_exception = None
Beispiel #15
0
    def __init__(self, parsed_url):
        super(RPCPublisher, self).__init__(parsed_url)

        options = urlparse.parse_qs(parsed_url.query)
        self.target = options.get('target', ['record_metering_data'])[0]

        self.rpc_client = messaging.get_rpc_client(
            messaging.get_transport(),
            retry=self.retry, version='1.0'
        )
Beispiel #16
0
    def setup_messaging(self, conf, exchange=None):
        self.useFixture(oslo_messaging.conffixture.ConfFixture(conf))
        conf.set_override("notification_driver", "messaging")
        if not exchange:
            exchange = "ceilometer"
        conf.set_override("control_exchange", exchange)

        # NOTE(sileht): Ensure a new oslo.messaging driver is loaded
        # between each tests
        self.transport = messaging.get_transport("fake://", cache=False)
        self.useFixture(mockpatch.Patch("ceilometer.messaging.get_transport", return_value=self.transport))
Beispiel #17
0
 def __init__(self, parsed_url, default_topic):
     super(NotifierPublisher, self).__init__(parsed_url)
     options = urlparse.parse_qs(parsed_url.query)
     topic = options.get('topic', [default_topic])[-1]
     self.notifier = oslo_messaging.Notifier(
         messaging.get_transport(),
         driver=cfg.CONF.publisher_notifier.telemetry_driver,
         publisher_id='telemetry.publisher.%s' % cfg.CONF.host,
         topic=topic,
         retry=self.retry
     )
Beispiel #18
0
    def __init__(self, worker_id, conf, namespaces=None):
        namespaces = namespaces or ['compute', 'central']
        group_prefix = conf.polling.partitioning_group_prefix

        super(AgentManager, self).__init__(worker_id)

        self.conf = conf

        if type(namespaces) is not list:
            namespaces = [namespaces]

        # we'll have default ['compute', 'central'] here if no namespaces will
        # be passed
        extensions = (self._extensions('poll', namespace, self.conf).extensions
                      for namespace in namespaces)
        # get the extensions from pollster builder
        extensions_fb = (self._extensions_from_builder('poll', namespace)
                         for namespace in namespaces)

        self.extensions = list(itertools.chain(*list(extensions))) + list(
            itertools.chain(*list(extensions_fb)))

        if not self.extensions:
            LOG.warning('No valid pollsters can be loaded from %s '
                        'namespaces', namespaces)

        discoveries = (self._extensions('discover', namespace,
                                        self.conf).extensions
                       for namespace in namespaces)
        self.discoveries = list(itertools.chain(*list(discoveries)))
        self.polling_periodics = None

        self.hashrings = None
        self.partition_coordinator = None
        if self.conf.coordination.backend_url:
            # XXX uuid4().bytes ought to work, but it requires ascii for now
            coordination_id = str(uuid.uuid4()).encode('ascii')
            self.partition_coordinator = coordination.get_coordinator(
                self.conf.coordination.backend_url, coordination_id)

        # Compose coordination group prefix.
        # We'll use namespaces as the basement for this partitioning.
        namespace_prefix = '-'.join(sorted(namespaces))
        self.group_prefix = ('%s-%s' % (namespace_prefix, group_prefix)
                             if group_prefix else namespace_prefix)

        self.notifier = oslo_messaging.Notifier(
            messaging.get_transport(self.conf),
            driver=self.conf.publisher_notifier.telemetry_driver,
            publisher_id="ceilometer.polling")

        self._keystone = None
        self._keystone_last_exception = None
Beispiel #19
0
    def setup_messaging(self, conf, exchange=None):
        self.useFixture(oslo_messaging.conffixture.ConfFixture(conf))
        conf.set_override("notification_driver", "messaging")
        if not exchange:
            exchange = "ceilometer"
        conf.set_override("control_exchange", exchange)

        # oslo.messaging fake driver needs time and thread
        # to be patched, otherwise there are chances of deadlocks
        eventlet.monkey_patch(time=True, thread=True)

        # NOTE(sileht): Ensure a new oslo.messaging driver is loaded
        # between each tests
        self.transport = messaging.get_transport("fake://", cache=False)
        self.useFixture(mockpatch.Patch("ceilometer.messaging.get_transport", return_value=self.transport))
Beispiel #20
0
    def start(self):
        """Bind the UDP socket and handle incoming data."""
        # ensure dispatcher is configured before starting other services
        dispatcher_managers = dispatcher.load_dispatcher_manager()
        (self.meter_manager, self.event_manager) = dispatcher_managers
        self.rpc_server = None
        self.sample_listener = None
        self.event_listener = None
        super(CollectorService, self).start()

        if cfg.CONF.collector.udp_address:
            self.tg.add_thread(self.start_udp)

        transport = messaging.get_transport(optional=True)
        if transport:
            if cfg.CONF.collector.enable_rpc:
                LOG.warning('RPC collector is deprecated in favour of queues. '
                            'Please switch to notifier publisher.')
                self.rpc_server = messaging.get_rpc_server(
                    transport, cfg.CONF.publisher_rpc.metering_topic, self)

            if list(self.meter_manager):
                sample_target = oslo_messaging.Target(
                    topic=cfg.CONF.publisher_notifier.metering_topic)
                self.sample_listener = messaging.get_notification_listener(
                    transport, [sample_target],
                    [SampleEndpoint(self.meter_manager)],
                    allow_requeue=(cfg.CONF.collector.
                                   requeue_sample_on_dispatcher_error))
                self.sample_listener.start()

            if cfg.CONF.notification.store_events and list(self.event_manager):
                event_target = oslo_messaging.Target(
                    topic=cfg.CONF.publisher_notifier.event_topic)
                self.event_listener = messaging.get_notification_listener(
                    transport, [event_target],
                    [EventEndpoint(self.event_manager)],
                    allow_requeue=(cfg.CONF.collector.
                                   requeue_event_on_dispatcher_error))
                self.event_listener.start()

            if cfg.CONF.collector.enable_rpc:
                self.rpc_server.start()

            if not cfg.CONF.collector.udp_address:
                # Add a dummy thread to have wait() working
                self.tg.add_timer(604800, lambda: None)
Beispiel #21
0
    def start(self):
        """Bind the UDP socket and handle incoming data."""
        # ensure dispatcher is configured before starting other services
        dispatcher_managers = dispatcher.load_dispatcher_manager()
        (self.meter_manager, self.event_manager) = dispatcher_managers
        self.sample_listener = None
        self.event_listener = None
        self.udp_thread = None
        super(CollectorService, self).start()

        if cfg.CONF.collector.udp_address:
            self.udp_thread = threading.Thread(target=self.start_udp)
            self.udp_thread.daemon = True
            self.udp_thread.start()

        transport = messaging.get_transport(optional=True)
        if transport:
            if list(self.meter_manager):
                sample_target = oslo_messaging.Target(
                    topic=cfg.CONF.publisher_notifier.metering_topic)
                self.sample_listener = (
                    messaging.get_batch_notification_listener(
                        transport, [sample_target],
                        [SampleEndpoint(self.meter_manager)],
                        allow_requeue=True,
                        batch_size=cfg.CONF.collector.batch_size,
                        batch_timeout=cfg.CONF.collector.batch_timeout))
                self.sample_listener.start()

            if cfg.CONF.notification.store_events and list(self.event_manager):
                event_target = oslo_messaging.Target(
                    topic=cfg.CONF.publisher_notifier.event_topic)
                self.event_listener = (
                    messaging.get_batch_notification_listener(
                        transport, [event_target],
                        [EventEndpoint(self.event_manager)],
                        allow_requeue=True,
                        batch_size=cfg.CONF.collector.batch_size,
                        batch_timeout=cfg.CONF.collector.batch_timeout))
                self.event_listener.start()

            if not cfg.CONF.collector.udp_address:
                # NOTE(sileht): We have to drop oslo.service to remove this
                # last eventlet thread
                # Add a dummy thread to have wait() working
                self.tg.add_timer(604800, lambda: None)
Beispiel #22
0
    def _configure_pipeline_listeners(self, reuse_listeners=False):
        with self.coord_lock:
            ev_pipes = []
            if cfg.CONF.notification.store_events:
                ev_pipes = self.event_pipeline_manager.pipelines
            pipelines = self.pipeline_manager.pipelines + ev_pipes
            transport = messaging.get_transport()
            partitioned = self.partition_coordinator.extract_my_subset(
                self.group_id,
                range(cfg.CONF.notification.pipeline_processing_queues))

            queue_set = {}
            for pipe_set, pipe in itertools.product(partitioned, pipelines):
                queue_set['%s-%s-%s' %
                          (self.NOTIFICATION_IPC, pipe.name, pipe_set)] = pipe

            if reuse_listeners:
                topics = queue_set.keys()
                kill_list = []
                for listener in self.pipeline_listeners:
                    if listener.dispatcher.targets[0].topic in topics:
                        queue_set.pop(listener.dispatcher.targets[0].topic)
                    else:
                        kill_list.append(listener)
                for listener in kill_list:
                    utils.kill_listeners([listener])
                    self.pipeline_listeners.remove(listener)
            else:
                utils.kill_listeners(self.pipeline_listeners)
                self.pipeline_listeners = []

            for topic, pipe in queue_set.items():
                LOG.debug('Pipeline endpoint: %s from set: %s', pipe.name,
                          pipe_set)
                pipe_endpoint = (pipeline.EventPipelineEndpoint
                                 if isinstance(pipe, pipeline.EventPipeline)
                                 else pipeline.SamplePipelineEndpoint)
                listener = messaging.get_batch_notification_listener(
                    transport,
                    [oslo_messaging.Target(topic=topic)],
                    [pipe_endpoint(self.ctxt, pipe)],
                    batch_size=cfg.CONF.notification.batch_size,
                    batch_timeout=cfg.CONF.notification.batch_timeout)
                listener.start()
                self.pipeline_listeners.append(listener)
Beispiel #23
0
    def setup_messaging(self, conf, exchange=None):
        self.useFixture(oslo.messaging.conffixture.ConfFixture(conf))
        conf.set_override("notification_driver", "messaging")
        if not exchange:
            exchange = 'ceilometer'
        conf.set_override("control_exchange", exchange)

        # NOTE(sileht): oslo.messaging fake driver uses time.sleep
        # for task switch, so we need to monkey_patch it
        # and also ensure the correct exchange have been set
        eventlet.monkey_patch(time=True)

        # NOTE(sileht): Ensure a new oslo.messaging driver is loaded
        # between each tests
        self.transport = messaging.get_transport("fake://", cache=False)
        self.useFixture(mockpatch.Patch(
            'ceilometer.messaging.get_transport',
            return_value=self.transport))
Beispiel #24
0
 def __init__(self, parsed_url, default_topic):
     super(NotifierPublisher, self).__init__(parsed_url)
     options = urlparse.parse_qs(parsed_url.query)
     topic = options.pop('topic', [default_topic])
     driver = options.pop('driver', ['rabbit'])[0]
     url = None
     if parsed_url.netloc != '':
         url = urlparse.urlunsplit([driver, parsed_url.netloc,
                                    parsed_url.path,
                                    urlparse.urlencode(options, True),
                                    parsed_url.fragment])
     self.notifier = oslo_messaging.Notifier(
         messaging.get_transport(url),
         driver=cfg.CONF.publisher_notifier.telemetry_driver,
         publisher_id='telemetry.publisher.%s' % cfg.CONF.host,
         topics=topic,
         retry=self.retry
     )
Beispiel #25
0
    def start(self):
        """Bind the UDP socket and handle incoming data."""
        # ensure dispatcher is configured before starting other services
        self.dispatcher_manager = dispatcher.load_dispatcher_manager()
        self.rpc_server = None
        super(CollectorService, self).start()

        if cfg.CONF.collector.udp_address:
            self.tg.add_thread(self.start_udp)

        transport = messaging.get_transport(optional=True)
        if transport:
            self.rpc_server = messaging.get_rpc_server(
                transport, cfg.CONF.publisher_rpc.metering_topic, self)
            self.rpc_server.start()

            if not cfg.CONF.collector.udp_address:
                # Add a dummy thread to have wait() working
                self.tg.add_timer(604800, lambda: None)
Beispiel #26
0
    def _configure_main_queue_listeners(self, pipe_manager,
                                        event_pipe_manager):
        notification_manager = self._get_notifications_manager(pipe_manager)
        if not list(notification_manager):
            LOG.warning(_('Failed to load any notification handlers for %s'),
                        self.NOTIFICATION_NAMESPACE)

        ack_on_error = cfg.CONF.notification.ack_on_event_error

        endpoints = []
        if cfg.CONF.notification.store_events:
            endpoints.append(
                event_endpoint.EventsNotificationEndpoint(event_pipe_manager))

        targets = []
        for ext in notification_manager:
            handler = ext.obj
            if (cfg.CONF.notification.disable_non_metric_meters and
                    isinstance(handler, base.NonMetricNotificationBase)):
                continue
            LOG.debug('Event types from %(name)s: %(type)s'
                      ' (ack_on_error=%(error)s)',
                      {'name': ext.name,
                       'type': ', '.join(handler.event_types),
                       'error': ack_on_error})
            # NOTE(gordc): this could be a set check but oslo_messaging issue
            # https://bugs.launchpad.net/oslo.messaging/+bug/1398511
            # This ensures we don't create multiple duplicate consumers.
            for new_tar in handler.get_targets(cfg.CONF):
                if new_tar not in targets:
                    targets.append(new_tar)
            endpoints.append(handler)

        urls = cfg.CONF.notification.messaging_urls or [None]
        for url in urls:
            transport = messaging.get_transport(url)
            listener = messaging.get_batch_notification_listener(
                transport, targets, endpoints,
                batch_size=cfg.CONF.notification.batch_size,
                batch_timeout=cfg.CONF.notification.batch_timeout)
            listener.start()
            self.listeners.append(listener)
Beispiel #27
0
    def _configure_pipeline_listener(self):
        with self.coord_lock:
            ev_pipes = []
            if cfg.CONF.notification.store_events:
                ev_pipes = self.event_pipeline_manager.pipelines
            pipelines = self.pipeline_manager.pipelines + ev_pipes
            transport = messaging.get_transport()
            partitioned = self.partition_coordinator.extract_my_subset(
                self.group_id,
                range(cfg.CONF.notification.pipeline_processing_queues))

            endpoints = []
            targets = []

            for pipe in pipelines:
                if isinstance(pipe, pipeline.EventPipeline):
                    endpoints.append(pipeline.EventPipelineEndpoint(self.ctxt,
                                                                    pipe))
                else:
                    endpoints.append(pipeline.SamplePipelineEndpoint(self.ctxt,
                                                                     pipe))

            for pipe_set, pipe in itertools.product(partitioned, pipelines):
                LOG.debug('Pipeline endpoint: %s from set: %s',
                          pipe.name, pipe_set)
                topic = '%s-%s-%s' % (self.NOTIFICATION_IPC,
                                      pipe.name, pipe_set)
                targets.append(oslo_messaging.Target(topic=topic))

            if self.pipeline_listener:
                self.pipeline_listener.stop()
                self.pipeline_listener.wait()

            self.pipeline_listener = messaging.get_batch_notification_listener(
                transport,
                targets,
                endpoints,
                batch_size=cfg.CONF.notification.batch_size,
                batch_timeout=cfg.CONF.notification.batch_timeout)
            self.pipeline_listener.start()
Beispiel #28
0
    def start(self):
        """Bind the UDP socket and handle incoming data."""
        # ensure dispatcher is configured before starting other services
        dispatcher_managers = dispatcher.load_dispatcher_manager()
        (self.meter_manager, self.event_manager) = dispatcher_managers
        self.sample_listener = None
        self.event_listener = None
        self.udp_thread = None
        super(CollectorService, self).start()

        if cfg.CONF.collector.udp_address:
            self.udp_thread = utils.spawn_thread(self.start_udp)

        transport = messaging.get_transport(optional=True)
        if transport:
            if list(self.meter_manager):
                sample_target = oslo_messaging.Target(
                    topic=cfg.CONF.publisher_notifier.metering_topic)
                self.sample_listener = (
                    messaging.get_batch_notification_listener(
                        transport, [sample_target],
                        [SampleEndpoint(cfg.CONF.publisher.telemetry_secret,
                                        self.meter_manager)],
                        allow_requeue=True,
                        batch_size=cfg.CONF.collector.batch_size,
                        batch_timeout=cfg.CONF.collector.batch_timeout))
                self.sample_listener.start()

            if cfg.CONF.notification.store_events and list(self.event_manager):
                event_target = oslo_messaging.Target(
                    topic=cfg.CONF.publisher_notifier.event_topic)
                self.event_listener = (
                    messaging.get_batch_notification_listener(
                        transport, [event_target],
                        [EventEndpoint(cfg.CONF.publisher.telemetry_secret,
                                       self.event_manager)],
                        allow_requeue=True,
                        batch_size=cfg.CONF.collector.batch_size,
                        batch_timeout=cfg.CONF.collector.batch_timeout))
                self.event_listener.start()
Beispiel #29
0
    def _configure_pipeline_listener(self):
        ev_pipes = self.event_pipeline_manager.pipelines
        pipelines = self.pipeline_manager.pipelines + ev_pipes
        transport = messaging.get_transport(self.conf)
        partitioned = self.partition_coordinator.extract_my_subset(
            self.group_id,
            range(self.conf.notification.pipeline_processing_queues))

        endpoints = []
        targets = []

        for pipe in pipelines:
            if isinstance(pipe, pipeline.EventPipeline):
                endpoints.append(pipeline.EventPipelineEndpoint(pipe))
            else:
                endpoints.append(pipeline.SamplePipelineEndpoint(pipe))

        for pipe_set, pipe in itertools.product(partitioned, pipelines):
            LOG.debug('Pipeline endpoint: %s from set: %s',
                      pipe.name, pipe_set)
            topic = '%s-%s-%s' % (self.NOTIFICATION_IPC,
                                  pipe.name, pipe_set)
            targets.append(oslo_messaging.Target(topic=topic))

        if self.pipeline_listener:
            self.pipeline_listener.stop()
            self.pipeline_listener.wait()

        self.pipeline_listener = messaging.get_batch_notification_listener(
            transport,
            targets,
            endpoints,
            batch_size=self.conf.notification.batch_size,
            batch_timeout=self.conf.notification.batch_timeout)
        # NOTE(gordc): set single thread to process data sequentially
        # if batching enabled.
        batch = (1 if self.conf.notification.batch_size > 1 else None)
        self.pipeline_listener.start(override_pool_size=batch)
Beispiel #30
0
 def test_get_transport_optional(self):
     self.CONF.set_override('rpc_backend', '')
     self.assertIsNone(messaging.get_transport(optional=True,
                                               cache=False))
Beispiel #31
0
    def start(self):
        super(NotificationService, self).start()
        self.partition_coordinator = None
        self.coord_lock = threading.Lock()

        self.listeners = []

        # NOTE(kbespalov): for the pipeline queues used a single amqp host
        # hence only one listener is required
        self.pipeline_listener = None

        self.pipeline_manager = pipeline.setup_pipeline()

        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        self.transport = messaging.get_transport()

        if cfg.CONF.notification.workload_partitioning:
            self.ctxt = context.get_admin_context()
            self.group_id = self.NOTIFICATION_NAMESPACE
            self.partition_coordinator = coordination.PartitionCoordinator()
            self.partition_coordinator.start()
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')
            self.group_id = None

        self.pipe_manager = self._get_pipe_manager(self.transport,
                                                   self.pipeline_manager)
        self.event_pipe_manager = self._get_event_pipeline_manager(
            self.transport)

        self._configure_main_queue_listeners(self.pipe_manager,
                                             self.event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.partition_coordinator.join_group(self.group_id)
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)
            self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                              self.partition_coordinator.heartbeat)
            self.tg.add_timer(cfg.CONF.coordination.check_watchers,
                              self.partition_coordinator.run_watchers)
            # configure pipelines after all coordination is configured.
            self._configure_pipeline_listener()

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(
                _LW('Non-metric meters may be collected. It is highly '
                    'advisable to disable these meters using '
                    'ceilometer.conf or the pipeline.yaml'))
        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)

        self.init_pipeline_refresh()
Beispiel #32
0
 def test_get_transport_url_no_caching(self):
     t1 = messaging.get_transport(self.CONF, 'fake://', cache=False)
     t2 = messaging.get_transport(self.CONF, 'fake://', cache=False)
     self.assertNotEqual(t1, t2)
Beispiel #33
0
 def test_get_transport_default_url_caching_mix(self):
     t1 = messaging.get_transport()
     t2 = messaging.get_transport(cache=False)
     self.assertNotEqual(t1, t2)
Beispiel #34
0
 def __init__(self, conf):
     transport = messaging.get_transport(conf)
     self.notifier = oslo_messaging.Notifier(
         transport, driver=conf.publisher_notifier.telemetry_driver,
         publisher_id="ceilometer.api")
Beispiel #35
0
def get_rpc_client(config_file):
    service.prepare_service(argv=['/', '--config-file', config_file])
    transport = messaging.get_transport()
    rpc_client = messaging.get_rpc_client(transport, version='1.0')
    return rpc_client
Beispiel #36
0
    def run(self):
        # Delay startup so workers are jittered
        time.sleep(self.startup_delay)

        super(NotificationService, self).run()
        self.shutdown = False
        self.periodic = None
        self.partition_coordinator = None
        self.coord_lock = threading.Lock()

        self.listeners = []

        # NOTE(kbespalov): for the pipeline queues used a single amqp host
        # hence only one listener is required
        self.pipeline_listener = None

        self.pipeline_manager = pipeline.setup_pipeline(self.conf)

        self.event_pipeline_manager = pipeline.setup_event_pipeline(self.conf)

        self.transport = messaging.get_transport(self.conf)

        if self.conf.notification.workload_partitioning:
            self.group_id = self.NOTIFICATION_NAMESPACE
            self.partition_coordinator = coordination.PartitionCoordinator(
                self.conf, self.coordination_id)
            self.partition_coordinator.start()
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')

        pipe_manager = self._get_pipe_manager(self.transport,
                                              self.pipeline_manager)
        event_pipe_manager = self._get_event_pipeline_manager(self.transport)

        self._configure_main_queue_listeners(pipe_manager, event_pipe_manager)

        if self.conf.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.partition_coordinator.join_group(self.group_id)
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)

            @periodics.periodic(spacing=self.conf.coordination.check_watchers,
                                run_immediately=True)
            def run_watchers():
                self.partition_coordinator.run_watchers()

            self.periodic = periodics.PeriodicWorker.create(
                [],
                executor_factory=lambda: futures.ThreadPoolExecutor(max_workers
                                                                    =10))
            self.periodic.add(run_watchers)

            utils.spawn_thread(self.periodic.start)

            # configure pipelines after all coordination is configured.
            with self.coord_lock:
                self._configure_pipeline_listener()
Beispiel #37
0
 def test_get_transport_optional(self):
     self.CONF.set_override('transport_url', 'non-url')
     self.assertIsNone(messaging.get_transport(optional=True, cache=False))
Beispiel #38
0
 def test_get_transport_url_caching_mix(self):
     t1 = messaging.get_transport('fake://')
     t2 = messaging.get_transport('fake://', cache=False)
     self.assertNotEqual(t1, t2)
Beispiel #39
0
 def __init__(self, conf):
     transport = messaging.get_transport(conf)
     self.notifier = oslo_messaging.Notifier(
         transport,
         driver=conf.publisher_notifier.telemetry_driver,
         publisher_id="ceilometer.api")
Beispiel #40
0
 def __init__(self):
     transport = messaging.get_transport()
     self.client = messaging.get_rpc_client(
         transport, topic=cfg.CONF.alarm.partition_rpc_topic, version="1.0")
Beispiel #41
0
    def run(self):
        super(NotificationService, self).run()
        self.shutdown = False
        self.periodic = None
        self.partition_coordinator = None
        self.coord_lock = threading.Lock()

        self.listeners = []

        # NOTE(kbespalov): for the pipeline queues used a single amqp host
        # hence only one listener is required
        self.pipeline_listener = None

        self.pipeline_manager = pipeline.setup_pipeline()

        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        self.transport = messaging.get_transport()

        if cfg.CONF.notification.workload_partitioning:
            self.group_id = self.NOTIFICATION_NAMESPACE
            self.partition_coordinator = coordination.PartitionCoordinator()
            self.partition_coordinator.start()
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')
            self.group_id = None

        self.pipe_manager = self._get_pipe_manager(self.transport,
                                                   self.pipeline_manager)
        self.event_pipe_manager = self._get_event_pipeline_manager(
            self.transport)

        self._configure_main_queue_listeners(self.pipe_manager,
                                             self.event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.partition_coordinator.join_group(self.group_id)
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)

            @periodics.periodic(spacing=cfg.CONF.coordination.heartbeat,
                                run_immediately=True)
            def heartbeat():
                self.partition_coordinator.heartbeat()

            @periodics.periodic(spacing=cfg.CONF.coordination.check_watchers,
                                run_immediately=True)
            def run_watchers():
                self.partition_coordinator.run_watchers()

            self.periodic = periodics.PeriodicWorker.create(
                [],
                executor_factory=lambda: futures.ThreadPoolExecutor(max_workers
                                                                    =10))
            self.periodic.add(heartbeat)
            self.periodic.add(run_watchers)

            utils.spawn_thread(self.periodic.start)

            # configure pipelines after all coordination is configured.
            with self.coord_lock:
                self._configure_pipeline_listener()

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(
                _LW('Non-metric meters may be collected. It is highly '
                    'advisable to disable these meters using '
                    'ceilometer.conf or the pipeline.yaml'))

        self.init_pipeline_refresh()
Beispiel #42
0
    def __init__(self, worker_id, conf, namespaces=None, pollster_list=None):
        namespaces = namespaces or ['compute', 'central']
        pollster_list = pollster_list or []
        group_prefix = conf.polling.partitioning_group_prefix

        # features of using coordination and pollster-list are exclusive, and
        # cannot be used at one moment to avoid both samples duplication and
        # samples being lost
        if pollster_list and conf.coordination.backend_url:
            raise PollsterListForbidden()

        super(AgentManager, self).__init__(worker_id)

        self.conf = conf

        def _match(pollster):
            """Find out if pollster name matches to one of the list."""
            return any(
                fnmatch.fnmatch(pollster.name, pattern)
                for pattern in pollster_list)

        if type(namespaces) is not list:
            namespaces = [namespaces]

        # we'll have default ['compute', 'central'] here if no namespaces will
        # be passed
        extensions = (self._extensions('poll', namespace, self.conf).extensions
                      for namespace in namespaces)
        # get the extensions from pollster builder
        extensions_fb = (self._extensions_from_builder('poll', namespace)
                         for namespace in namespaces)
        if pollster_list:
            extensions = (moves.filter(_match, exts) for exts in extensions)
            extensions_fb = (moves.filter(_match, exts)
                             for exts in extensions_fb)

        self.extensions = list(itertools.chain(*list(extensions))) + list(
            itertools.chain(*list(extensions_fb)))

        if self.extensions == []:
            raise EmptyPollstersList()

        discoveries = (self._extensions('discover', namespace,
                                        self.conf).extensions
                       for namespace in namespaces)
        self.discoveries = list(itertools.chain(*list(discoveries)))
        self.polling_periodics = None

        if self.conf.coordination.backend_url:
            # XXX uuid4().bytes ought to work, but it requires ascii for now
            coordination_id = str(uuid.uuid4()).encode('ascii')
            self.partition_coordinator = coordination.get_coordinator(
                self.conf.coordination.backend_url, coordination_id)
        else:
            self.partition_coordinator = None

        # Compose coordination group prefix.
        # We'll use namespaces as the basement for this partitioning.
        namespace_prefix = '-'.join(sorted(namespaces))
        self.group_prefix = ('%s-%s' % (namespace_prefix, group_prefix)
                             if group_prefix else namespace_prefix)

        self.notifier = oslo_messaging.Notifier(
            messaging.get_transport(self.conf),
            driver=self.conf.publisher_notifier.telemetry_driver,
            publisher_id="ceilometer.polling")

        self._keystone = None
        self._keystone_last_exception = None
Beispiel #43
0
 def __init__(self):
     super(AlarmNotifierService, self).__init__()
     transport = messaging.get_transport()
     self.rpc_server = messaging.get_rpc_server(
         transport, cfg.CONF.alarm.notifier_rpc_topic, self)
Beispiel #44
0
 def test_get_transport_url_caching(self):
     t1 = messaging.get_transport('fake://')
     t2 = messaging.get_transport('fake://')
     self.assertEqual(t1, t2)
Beispiel #45
0
 def test_get_transport_default_url_caching(self):
     t1 = messaging.get_transport()
     t2 = messaging.get_transport()
     self.assertEqual(t1, t2)
Beispiel #46
0
 def __init__(self, conf, mgr):
     self.conf = conf
     self.mgr = mgr
     self.notifiers = self._get_notifiers(messaging.get_transport(conf))