Beispiel #1
0
    def start(self):
        super(NotificationService, self).start()
        self.pipeline_manager = pipeline.setup_pipeline()
        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        transport = messaging.get_transport()
        self.partition_coordinator = coordination.PartitionCoordinator()
        self.partition_coordinator.start()

        event_pipe_manager = None
        if cfg.CONF.notification.workload_partitioning:
            pipe_manager = pipeline.SamplePipelineTransportManager()
            for pipe in self.pipeline_manager.pipelines:
                pipe_manager.add_transporter(
                    (pipe.source.support_meter,
                     self._get_notifier(transport, pipe)))
            if cfg.CONF.notification.store_events:
                event_pipe_manager = pipeline.EventPipelineTransportManager()
                for pipe in self.event_pipeline_manager.pipelines:
                    event_pipe_manager.add_transporter(
                        (pipe.source.support_event,
                         self._get_notifier(transport, pipe)))

            self.ctxt = context.get_admin_context()
            self.group_id = self.NOTIFICATION_NAMESPACE
        else:
            # FIXME(sileht): endpoint use notification_topics option
            # and it should not because this is oslo_messaging option
            # not a ceilometer, until we have a something to get
            # the notification_topics in an other way
            # we must create a transport to ensure the option have
            # beeen registered by oslo_messaging
            messaging.get_notifier(transport, '')
            pipe_manager = self.pipeline_manager
            if cfg.CONF.notification.store_events:
                event_pipe_manager = self.event_pipeline_manager
            self.group_id = None

        self.listeners, self.pipeline_listeners = [], []
        self._configure_main_queue_listeners(pipe_manager, event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            self.partition_coordinator.join_group(self.group_id)
            self._configure_pipeline_listeners()
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)

            self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                              self.partition_coordinator.heartbeat)
            self.tg.add_timer(cfg.CONF.coordination.check_watchers,
                              self.partition_coordinator.run_watchers)

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(
                _LW('Non-metric meters may be collected. It is highly '
                    'advisable to disable these meters using '
                    'ceilometer.conf or the pipeline.yaml'))
        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
Beispiel #2
0
    def start(self):
        super(NotificationService, self).start()
        self.partition_coordinator = None
        self.coord_lock = threading.Lock()

        self.listeners = []

        # NOTE(kbespalov): for the pipeline queues used a single amqp host
        # hence only one listener is required
        self.pipeline_listener = None

        self.pipeline_manager = pipeline.setup_pipeline()

        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        self.transport = messaging.get_transport()

        if cfg.CONF.notification.workload_partitioning:
            self.ctxt = context.get_admin_context()
            self.group_id = self.NOTIFICATION_NAMESPACE
            self.partition_coordinator = coordination.PartitionCoordinator()
            self.partition_coordinator.start()
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')
            self.group_id = None

        self.pipe_manager = self._get_pipe_manager(self.transport,
                                                   self.pipeline_manager)
        self.event_pipe_manager = self._get_event_pipeline_manager(
            self.transport)

        self._configure_main_queue_listeners(self.pipe_manager,
                                             self.event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.partition_coordinator.join_group(self.group_id)
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)
            self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                              self.partition_coordinator.heartbeat)
            self.tg.add_timer(cfg.CONF.coordination.check_watchers,
                              self.partition_coordinator.run_watchers)
            # configure pipelines after all coordination is configured.
            self._configure_pipeline_listener()

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(_LW('Non-metric meters may be collected. It is highly '
                            'advisable to disable these meters using '
                            'ceilometer.conf or the pipeline.yaml'))
        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)

        self.init_pipeline_refresh()
    def run(self):
        # Delay startup so workers are jittered
        time.sleep(self.startup_delay)

        super(NotificationService, self).run()

        self.managers = [ext.obj for ext in named.NamedExtensionManager(
            namespace='ceilometer.notification.pipeline',
            names=self.conf.notification.pipelines, invoke_on_load=True,
            on_missing_entrypoints_callback=self._log_missing_pipeline,
            invoke_args=(self.conf,))]

        # FIXME(sileht): endpoint uses the notification_topics option
        # and it should not because this is an oslo_messaging option
        # not a ceilometer. Until we have something to get the
        # notification_topics in another way, we must create a transport
        # to ensure the option has been registered by oslo_messaging.
        messaging.get_notifier(messaging.get_transport(self.conf), '')

        endpoints = []
        for pipe_mgr in self.managers:
            endpoints.extend(pipe_mgr.get_main_endpoints())
        targets = self.get_targets()

        urls = self.conf.notification.messaging_urls or [None]
        for url in urls:
            transport = messaging.get_transport(self.conf, url)
            # NOTE(gordc): ignore batching as we want pull
            # to maintain sequencing as much as possible.
            listener = messaging.get_batch_notification_listener(
                transport, targets, endpoints, allow_requeue=True)
            listener.start(
                override_pool_size=self.conf.max_parallel_requests
            )
            self.listeners.append(listener)
    def start(self):
        super(NotificationService, self).start()
        self.pipeline_manager = pipeline.setup_pipeline()
        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        transport = messaging.get_transport()
        self.partition_coordinator = coordination.PartitionCoordinator()
        self.partition_coordinator.start()

        event_pipe_manager = None
        if cfg.CONF.notification.workload_partitioning:
            pipe_manager = pipeline.SamplePipelineTransportManager()
            for pipe in self.pipeline_manager.pipelines:
                pipe_manager.add_transporter(
                    (pipe.source.support_meter,
                     self._get_notifier(transport, pipe)))
            if cfg.CONF.notification.store_events:
                event_pipe_manager = pipeline.EventPipelineTransportManager()
                for pipe in self.event_pipeline_manager.pipelines:
                    event_pipe_manager.add_transporter(
                        (pipe.source.support_event,
                         self._get_notifier(transport, pipe)))

            self.ctxt = context.get_admin_context()
            self.group_id = self.NOTIFICATION_NAMESPACE
        else:
            # FIXME(sileht): endpoint use notification_topics option
            # and it should not because this is oslo_messaging option
            # not a ceilometer, until we have a something to get
            # the notification_topics in an other way
            # we must create a transport to ensure the option have
            # beeen registered by oslo_messaging
            messaging.get_notifier(transport, '')
            pipe_manager = self.pipeline_manager
            if cfg.CONF.notification.store_events:
                event_pipe_manager = self.event_pipeline_manager
            self.group_id = None

        self.listeners, self.pipeline_listeners = [], []
        self._configure_main_queue_listeners(pipe_manager, event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            self.partition_coordinator.join_group(self.group_id)
            self._configure_pipeline_listeners()
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)

            self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                              self.partition_coordinator.heartbeat)
            self.tg.add_timer(cfg.CONF.coordination.check_watchers,
                              self.partition_coordinator.run_watchers)

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(_LW('Non-metric meters may be collected. It is highly '
                            'advisable to disable these meters using '
                            'ceilometer.conf or the pipeline.yaml'))
        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
Beispiel #5
0
    def start(self):
        super(NotificationService, self).start()
        self.partition_coordinator = None
        self.coord_lock = threading.Lock()
        self.listeners, self.pipeline_listeners = [], []

        self.pipeline_manager = pipeline.setup_pipeline()

        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        self.transport = messaging.get_transport()

        if cfg.CONF.notification.workload_partitioning:
            self.ctxt = context.get_admin_context()
            self.group_id = self.NOTIFICATION_NAMESPACE
            self.partition_coordinator = coordination.PartitionCoordinator()
            self.partition_coordinator.start()
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')
            self.group_id = None

        self.pipe_manager = self._get_pipe_manager(self.transport,
                                                   self.pipeline_manager)
        self.event_pipe_manager = self._get_event_pipeline_manager(
            self.transport)

        self.listeners, self.pipeline_listeners = [], []
        self._configure_main_queue_listeners(self.pipe_manager,
                                             self.event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.partition_coordinator.join_group(self.group_id)
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)
            self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                              self.partition_coordinator.heartbeat)
            self.tg.add_timer(cfg.CONF.coordination.check_watchers,
                              self.partition_coordinator.run_watchers)
            # configure pipelines after all coordination is configured.
            self._configure_pipeline_listeners()

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(
                _LW('Non-metric meters may be collected. It is highly '
                    'advisable to disable these meters using '
                    'ceilometer.conf or the pipeline.yaml'))
        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)

        self.init_pipeline_refresh()
Beispiel #6
0
    def run(self):
        # Delay startup so workers are jittered
        time.sleep(self.startup_delay)

        super(NotificationService, self).run()
        self.shutdown = False
        self.periodic = None
        self.coord_lock = threading.Lock()

        self.listeners = []

        # NOTE(kbespalov): for the pipeline queues used a single amqp host
        # hence only one listener is required
        self.pipeline_listener = None

        self.pipeline_manager = pipeline.setup_pipeline(self.conf)

        self.event_pipeline_manager = pipeline.setup_event_pipeline(self.conf)

        self.transport = messaging.get_transport(self.conf)

        if self.conf.notification.workload_partitioning:
            self.partition_coordinator.start()
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')

        pipe_manager = self._get_pipe_manager(self.transport,
                                              self.pipeline_manager)
        event_pipe_manager = self._get_event_pipeline_manager(self.transport)

        self._configure_main_queue_listeners(pipe_manager, event_pipe_manager)

        if self.conf.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.hashring = self.partition_coordinator.join_partitioned_group(
                self.NOTIFICATION_NAMESPACE)

            @periodics.periodic(spacing=self.conf.coordination.check_watchers,
                                run_immediately=True)
            def run_watchers():
                self.partition_coordinator.run_watchers()

            self.periodic = periodics.PeriodicWorker.create(
                [],
                executor_factory=lambda: futures.ThreadPoolExecutor(max_workers
                                                                    =10))
            self.periodic.add(run_watchers)

            utils.spawn_thread(self.periodic.start)
            # configure pipelines after all coordination is configured.
            with self.coord_lock:
                self._configure_pipeline_listener()
    def test_notification_reloaded_event_pipeline(self, fake_publisher_cls):
        fake_publisher_cls.return_value = self.publisher

        ev_pipeline_cfg_file = self.setup_event_pipeline(
            ['compute.instance.create.start'])
        self.CONF.set_override("event_pipeline_cfg_file", ev_pipeline_cfg_file)

        self.CONF.set_override("store_events", True, group="notification")
        self.expected_events = 1
        self.srv.start()

        notifier = messaging.get_notifier(self.transport,
                                          "compute.vagrant-precise")
        notifier.info(context.RequestContext(),
                      'compute.instance.create.start',
                      TEST_NOTICE_PAYLOAD)

        start = timeutils.utcnow()
        while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
            if len(self.publisher.events) >= self.expected_events:
                break
            eventlet.sleep(0)

        self.assertEqual(self.expected_events, len(self.publisher.events))

        # Flush publisher events to test reloading
        self.publisher.events = []
        # Modify the collection targets
        updated_ev_pipeline_cfg_file = self.setup_event_pipeline(
            ['compute.instance.*'])
        # Move/re-name the updated pipeline file to the original pipeline
        # file path as recorded in oslo config
        shutil.move(updated_ev_pipeline_cfg_file, ev_pipeline_cfg_file)

        self.expected_events = 1
        # Random sleep to let the pipeline poller complete the reloading
        eventlet.sleep(3)
        # Send message again to verify the reload works
        notifier = messaging.get_notifier(self.transport,
                                          "compute.vagrant-precise")
        notifier.info(context.RequestContext(), 'compute.instance.create.end',
                      TEST_NOTICE_PAYLOAD)

        start = timeutils.utcnow()
        while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
            if len(self.publisher.events) >= self.expected_events:
                break
            eventlet.sleep(0)

        self.assertEqual(self.expected_events, len(self.publisher.events))

        self.assertEqual(self.publisher.events[0].event_type,
                         'compute.instance.create.end')
        self.srv.stop()
Beispiel #8
0
    def start(self):
        super(NotificationService, self).start()
        # FIXME(sileht): endpoint use notification_topics option
        # and it should not because this is oslo.messaging option
        # not a ceilometer, until we have a something to get
        # the notification_topics in an other way
        # we must create a transport to ensure the option have
        # beeen registered by oslo.messaging
        transport = messaging.get_transport()
        messaging.get_notifier(transport, '')

        self.pipeline_manager = pipeline.setup_pipeline()

        self.notification_manager = self._get_notifications_manager(
            self.pipeline_manager)
        if not list(self.notification_manager):
            LOG.warning(_('Failed to load any notification handlers for %s'),
                        self.NOTIFICATION_NAMESPACE)

        ack_on_error = cfg.CONF.notification.ack_on_event_error

        endpoints = []
        if cfg.CONF.notification.store_events:
            endpoints = [event_endpoint.EventsNotificationEndpoint()]

        targets = []
        for ext in self.notification_manager:
            handler = ext.obj
            LOG.debug(
                _('Event types from %(name)s: %(type)s'
                  ' (ack_on_error=%(error)s)') % {
                      'name': ext.name,
                      'type': ', '.join(handler.event_types),
                      'error': ack_on_error
                  })
            # NOTE(gordc): this could be a set check but oslo.messaging issue
            # https://bugs.launchpad.net/oslo.messaging/+bug/1398511
            # This ensures we don't create multiple duplicate consumers.
            for new_tar in handler.get_targets(cfg.CONF):
                if new_tar not in targets:
                    targets.append(new_tar)
            endpoints.append(handler)

        urls = cfg.CONF.notification.messaging_urls or [None]
        self.listeners = []
        for url in urls:
            transport = messaging.get_transport(url)
            listener = messaging.get_notification_listener(
                transport, targets, endpoints)
            listener.start()
            self.listeners.append(listener)

        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
    def test_notification_reloaded_pipeline(self, fake_publisher_cls):
        fake_publisher_cls.return_value = self.publisher

        pipeline_cfg_file = self.setup_pipeline(['instance'])
        self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file)

        self.expected_samples = 1
        self.srv.start()

        notifier = messaging.get_notifier(self.transport,
                                          "compute.vagrant-precise")
        notifier.info(context.RequestContext(), 'compute.instance.create.end',
                      TEST_NOTICE_PAYLOAD)

        start = timeutils.utcnow()
        while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
            if (len(self.publisher.samples) >= self.expected_samples and
                    len(self.publisher.events) >= self.expected_events):
                break
            eventlet.sleep(0)

        self.assertEqual(self.expected_samples, len(self.publisher.samples))

        # Flush publisher samples to test reloading
        self.publisher.samples = []
        # Modify the collection targets
        updated_pipeline_cfg_file = self.setup_pipeline(['vcpus',
                                                         'disk.root.size'])
        # Move/re-name the updated pipeline file to the original pipeline
        # file path as recorded in oslo config
        shutil.move(updated_pipeline_cfg_file, pipeline_cfg_file)

        self.expected_samples = 2
        # Random sleep to let the pipeline poller complete the reloading
        eventlet.sleep(3)
        # Send message again to verify the reload works
        notifier = messaging.get_notifier(self.transport,
                                          "compute.vagrant-precise")
        notifier.info(context.RequestContext(), 'compute.instance.create.end',
                      TEST_NOTICE_PAYLOAD)

        start = timeutils.utcnow()
        while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
            if (len(self.publisher.samples) >= self.expected_samples and
                    len(self.publisher.events) >= self.expected_events):
                break
            eventlet.sleep(0)

        self.assertEqual(self.expected_samples, len(self.publisher.samples))

        (self.assertIn(sample.name, ['disk.root.size', 'vcpus'])
         for sample in self.publisher.samples)
        self.srv.stop()
Beispiel #10
0
    def start(self):
        super(NotificationService, self).start()
        # FIXME(sileht): endpoint use notification_topics option
        # and it should not because this is oslo.messaging option
        # not a ceilometer, until we have a something to get
        # the notification_topics in an other way
        # we must create a transport to ensure the option have
        # beeen registered by oslo.messaging
        transport = messaging.get_transport()
        messaging.get_notifier(transport, '')

        self.pipeline_manager = pipeline.setup_pipeline()

        self.notification_manager = self._get_notifications_manager(
            self.pipeline_manager)
        if not list(self.notification_manager):
            LOG.warning(_('Failed to load any notification handlers for %s'),
                        self.NOTIFICATION_NAMESPACE)

        ack_on_error = cfg.CONF.notification.ack_on_event_error

        endpoints = []
        if cfg.CONF.notification.store_events:
            endpoints = [event_endpoint.EventsNotificationEndpoint()]

        targets = []
        for ext in self.notification_manager:
            handler = ext.obj
            LOG.debug(_('Event types from %(name)s: %(type)s'
                        ' (ack_on_error=%(error)s)') %
                      {'name': ext.name,
                       'type': ', '.join(handler.event_types),
                       'error': ack_on_error})
            # NOTE(gordc): this could be a set check but oslo.messaging issue
            # https://bugs.launchpad.net/oslo.messaging/+bug/1398511
            # This ensures we don't create multiple duplicate consumers.
            for new_tar in handler.get_targets(cfg.CONF):
                if new_tar not in targets:
                    targets.append(new_tar)
            endpoints.append(handler)

        urls = cfg.CONF.notification.messaging_urls or [None]
        self.listeners = []
        for url in urls:
            transport = messaging.get_transport(url)
            listener = messaging.get_notification_listener(
                transport, targets, endpoints)
            listener.start()
            self.listeners.append(listener)

        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
Beispiel #11
0
    def run(self):
        # Delay startup so workers are jittered
        time.sleep(self.startup_delay)

        super(NotificationService, self).run()
        self.coord_lock = threading.Lock()

        self.managers = [
            ext.obj for ext in named.NamedExtensionManager(
                namespace='ceilometer.notification.pipeline',
                names=self.conf.notification.pipelines,
                invoke_on_load=True,
                on_missing_entrypoints_callback=self._log_missing_pipeline,
                invoke_args=(self.conf,
                             self.conf.notification.workload_partitioning))
        ]

        self.transport = messaging.get_transport(self.conf)

        if self.conf.notification.workload_partitioning:
            self.partition_coordinator.start(start_heart=True)
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')

        self._configure_main_queue_listeners()

        if self.conf.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.hashring = self.partition_coordinator.join_partitioned_group(
                self.NOTIFICATION_NAMESPACE)

            @periodics.periodic(spacing=self.conf.coordination.check_watchers,
                                run_immediately=True)
            def run_watchers():
                self.partition_coordinator.run_watchers()
                if self.group_state != self.hashring.ring.nodes:
                    self.group_state = self.hashring.ring.nodes.copy()
                    self._refresh_agent()

            self.periodic = periodics.PeriodicWorker.create(
                [],
                executor_factory=lambda: futures.ThreadPoolExecutor(max_workers
                                                                    =10))
            self.periodic.add(run_watchers)
            utils.spawn_thread(self.periodic.start)
    def _check_notifications(self, fake_publisher_cls):
        fake_publisher_cls.side_effect = [self.publisher, self.publisher2]

        self.srv = notification.NotificationService()
        self.srv2 = notification.NotificationService()
        with mock.patch(
            "ceilometer.coordination.PartitionCoordinator" "._get_members", return_value=["harry", "lloyd"]
        ):
            with mock.patch("uuid.uuid4", return_value="harry"):
                self.srv.start()
            with mock.patch("uuid.uuid4", return_value="lloyd"):
                self.srv2.start()

        notifier = messaging.get_notifier(self.transport, "compute.vagrant-precise")
        payload1 = TEST_NOTICE_PAYLOAD.copy()
        payload1["instance_id"] = "0"
        notifier.info(context.RequestContext(), "compute.instance.create.end", payload1)
        payload2 = TEST_NOTICE_PAYLOAD.copy()
        payload2["instance_id"] = "1"
        notifier.info(context.RequestContext(), "compute.instance.create.end", payload2)
        self.expected_samples = 4
        start = timeutils.utcnow()
        with mock.patch("six.moves.builtins.hash", lambda x: int(x)):
            while timeutils.delta_seconds(start, timeutils.utcnow()) < 60:
                if len(self.publisher.samples + self.publisher2.samples) >= self.expected_samples:
                    break
                eventlet.sleep(0)
            self.srv.stop()
            self.srv2.stop()

        self.assertEqual(2, len(self.publisher.samples))
        self.assertEqual(2, len(self.publisher2.samples))
        self.assertEqual(1, len(set(s.resource_id for s in self.publisher.samples)))
        self.assertEqual(1, len(set(s.resource_id for s in self.publisher2.samples)))
Beispiel #13
0
    def _check_notifications(self, fake_publisher_cls):
        fake_publisher_cls.side_effect = [self.publisher, self.publisher2]

        self.srv = notification.NotificationService(0, self.CONF, 'harry')
        self.srv2 = notification.NotificationService(0, self.CONF, 'lloyd')
        with mock.patch('ceilometer.coordination.PartitionCoordinator'
                        '._get_members', return_value=['harry', 'lloyd']):
            self.srv.run()
            self.addCleanup(self.srv.terminate)
            self.srv2.run()
            self.addCleanup(self.srv2.terminate)

        notifier = messaging.get_notifier(self.transport,
                                          "compute.vagrant-precise")
        payload1 = TEST_NOTICE_PAYLOAD.copy()
        payload1['instance_id'] = '0'
        notifier.info({}, 'compute.instance.create.end', payload1)
        payload2 = TEST_NOTICE_PAYLOAD.copy()
        payload2['instance_id'] = '1'
        notifier.info({}, 'compute.instance.create.end', payload2)
        self.expected_samples = 4
        with mock.patch('six.moves.builtins.hash', lambda x: int(x)):
            start = time.time()
            while time.time() - start < 60:
                if (len(self.publisher.samples + self.publisher2.samples) >=
                        self.expected_samples):
                    break
                time.sleep(0.1)

        self.assertEqual(2, len(self.publisher.samples))
        self.assertEqual(2, len(self.publisher2.samples))
        self.assertEqual(1, len(set(
            s.resource_id for s in self.publisher.samples)))
        self.assertEqual(1, len(set(
            s.resource_id for s in self.publisher2.samples)))
Beispiel #14
0
def _send_notification(event, payload):
    notification = event.replace(" ", "_")
    notification = "alarm.%s" % notification
    transport = messaging.get_transport()
    notifier = messaging.get_notifier(transport, publisher_id="ceilometer.api")
    # FIXME(sileht): perhaps we need to copy some infos from the
    # pecan request headers like nova does
    notifier.info(context.RequestContext(), notification, payload)
Beispiel #15
0
def _send_notification(event, payload):
    notification = event.replace(" ", "_")
    notification = "alarm.%s" % notification
    transport = messaging.get_transport()
    notifier = messaging.get_notifier(transport, publisher_id="ceilometer.api")
    # FIXME(sileht): perhaps we need to copy some infos from the
    # pecan request headers like nova does
    notifier.info(context.RequestContext(), notification, payload)
    def _check_notifications(self, fake_publisher_cls):
        fake_publisher_cls.side_effect = [self.publisher, self.publisher2]

        maybe = {"srv": 0, "srv2": -1}

        def _sometimes_srv(item):
            maybe["srv"] += 1
            return (maybe["srv"] % 2) == 0

        self.srv = notification.NotificationService(0, self.CONF)
        self.srv.partition_coordinator = pc = mock.MagicMock()
        hashring_srv1 = mock.MagicMock()
        hashring_srv1.belongs_to_self = _sometimes_srv
        hashring_srv1.ring.nodes = {'id1': mock.Mock()}
        pc.join_partitioned_group.return_value = hashring_srv1
        self.srv.run()
        self.addCleanup(self.srv.terminate)

        def _sometimes_srv2(item):
            maybe["srv2"] += 1
            return (maybe["srv2"] % 2) == 0

        self.srv2 = notification.NotificationService(0, self.CONF)
        self.srv2.partition_coordinator = pc = mock.MagicMock()
        hashring = mock.MagicMock()
        hashring.belongs_to_self = _sometimes_srv2
        hashring.ring.nodes = {'id1': mock.Mock(), 'id2': mock.Mock()}
        self.srv.hashring.ring.nodes = hashring.ring.nodes.copy()
        pc.join_partitioned_group.return_value = hashring
        self.srv2.run()
        self.addCleanup(self.srv2.terminate)

        notifier = messaging.get_notifier(self.transport,
                                          "compute.vagrant-precise")
        payload1 = TEST_NOTICE_PAYLOAD.copy()
        payload1['instance_id'] = '0'
        notifier.info({}, 'compute.instance.create.end', payload1)
        payload2 = TEST_NOTICE_PAYLOAD.copy()
        payload2['instance_id'] = '1'
        notifier.info({}, 'compute.instance.create.end', payload2)
        self.expected_samples = 4
        with mock.patch('six.moves.builtins.hash', lambda x: int(x)):
            start = time.time()
            while time.time() - start < 10:
                if (len(self.publisher.samples + self.publisher2.samples) >=
                        self.expected_samples
                        and len(self.srv.group_state) == 2):
                    break
                time.sleep(0.1)

        self.assertEqual(2, len(self.publisher.samples))
        self.assertEqual(2, len(self.publisher2.samples))
        self.assertEqual(
            1, len(set(s.resource_id for s in self.publisher.samples)))
        self.assertEqual(
            1, len(set(s.resource_id for s in self.publisher2.samples)))
        self.assertEqual(2, len(self.srv.group_state))
 def test_notification_service_error_topic(self, fake_publisher_cls):
     fake_publisher_cls.return_value = self.publisher
     self.run_service(self.srv)
     notifier = messaging.get_notifier(self.transport,
                                       'compute.vagrant-precise')
     notifier.error({}, 'compute.instance.error', TEST_NOTICE_PAYLOAD)
     start = time.time()
     while time.time() - start < 60:
         if len(self.publisher.events) >= self.expected_events:
             break
     self.assertEqual(self.expected_events, len(self.publisher.events))
 def test_notification_service_error_topic(self, fake_publisher_cls):
     fake_publisher_cls.return_value = self.publisher
     self.srv.start()
     notifier = messaging.get_notifier(self.transport, "compute.vagrant-precise")
     notifier.error(context.RequestContext(), "compute.instance.error", TEST_NOTICE_PAYLOAD)
     start = timeutils.utcnow()
     while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
         if len(self.publisher.events) >= self.expected_events:
             break
         eventlet.sleep(0)
     self.srv.stop()
     self.assertEqual(self.expected_events, len(self.publisher.events))
 def test_notification_service_error_topic(self, fake_publisher_cls):
     fake_publisher_cls.return_value = self.publisher
     self.srv.start()
     self.addCleanup(self.srv.stop)
     notifier = messaging.get_notifier(self.transport,
                                       'compute.vagrant-precise')
     notifier.error({}, 'compute.instance.error', TEST_NOTICE_PAYLOAD)
     start = timeutils.utcnow()
     while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
         if len(self.publisher.events) >= self.expected_events:
             break
     self.assertEqual(self.expected_events, len(self.publisher.events))
 def test_notification_service_error_topic(self, fake_publisher_cls):
     fake_publisher_cls.return_value = self.publisher
     self.srv.start()
     self.addCleanup(self.srv.stop)
     notifier = messaging.get_notifier(self.transport,
                                       'compute.vagrant-precise')
     notifier.error({}, 'compute.instance.error',
                    TEST_NOTICE_PAYLOAD)
     start = timeutils.utcnow()
     while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
         if len(self.publisher.events) >= self.expected_events:
             break
     self.assertEqual(self.expected_events, len(self.publisher.events))
 def test_notification_service_error_topic(self, fake_publisher_cls):
     fake_publisher_cls.return_value = self.publisher
     self.srv.run()
     self.addCleanup(self.srv.terminate)
     notifier = messaging.get_notifier(self.transport,
                                       'compute.vagrant-precise')
     notifier.error({}, 'compute.instance.error',
                    TEST_NOTICE_PAYLOAD)
     start = time.time()
     while time.time() - start < 60:
         if len(self.publisher.events) >= self.expected_events:
             break
     self.assertEqual(self.expected_events, len(self.publisher.events))
    def _check_notification_service(self):
        self.run_service(self.srv)
        notifier = messaging.get_notifier(self.transport,
                                          "compute.vagrant-precise")
        notifier.info({}, 'compute.instance.create.end', TEST_NOTICE_PAYLOAD)
        start = time.time()
        while time.time() - start < 60:
            if (len(self.publisher.samples) >= self.expected_samples
                    and len(self.publisher.events) >= self.expected_events):
                break

        resources = list(set(s.resource_id for s in self.publisher.samples))
        self.assertEqual(self.expected_samples, len(self.publisher.samples))
        self.assertEqual(self.expected_events, len(self.publisher.events))
        self.assertEqual(["9f9d01b9-4a58-4271-9e27-398b21ab20d1"], resources)
Beispiel #23
0
    def _check_notification_service(self):
        self.srv.start()

        notifier = messaging.get_notifier(self.transport,
                                          "compute.vagrant-precise")
        notifier.info({}, 'compute.instance.create.end', TEST_NOTICE_PAYLOAD)
        start = timeutils.utcnow()
        while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
            if (len(self.publisher.samples) >= self.expected_samples
                    and len(self.publisher.events) >= self.expected_events):
                break
        self.assertNotEqual(self.srv.listeners, self.srv.pipeline_listeners)
        self.srv.stop()

        resources = list(set(s.resource_id for s in self.publisher.samples))
        self.assertEqual(self.expected_samples, len(self.publisher.samples))
        self.assertEqual(self.expected_events, len(self.publisher.events))
        self.assertEqual(["9f9d01b9-4a58-4271-9e27-398b21ab20d1"], resources)
    def _check_notification_service(self):
        self.srv.start()
        self.addCleanup(self.srv.stop)

        notifier = messaging.get_notifier(self.transport,
                                          "compute.vagrant-precise")
        notifier.info({}, 'compute.instance.create.end',
                      TEST_NOTICE_PAYLOAD)
        start = timeutils.utcnow()
        while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
            if (len(self.publisher.samples) >= self.expected_samples and
                    len(self.publisher.events) >= self.expected_events):
                break

        resources = list(set(s.resource_id for s in self.publisher.samples))
        self.assertEqual(self.expected_samples, len(self.publisher.samples))
        self.assertEqual(self.expected_events, len(self.publisher.events))
        self.assertEqual(["9f9d01b9-4a58-4271-9e27-398b21ab20d1"], resources)
    def _check_notification_service(self):
        self.srv.start()

        notifier = messaging.get_notifier(self.transport, "compute.vagrant-precise")
        notifier.info(context.RequestContext(), "compute.instance.create.end", TEST_NOTICE_PAYLOAD)
        start = timeutils.utcnow()
        while timeutils.delta_seconds(start, timeutils.utcnow()) < 600:
            if (
                len(self.publisher.samples) >= self.expected_samples
                and len(self.publisher.events) >= self.expected_events
            ):
                break
            eventlet.sleep(0)
        self.assertNotEqual(self.srv.listeners, self.srv.pipeline_listeners)
        self.srv.stop()

        resources = list(set(s.resource_id for s in self.publisher.samples))
        self.assertEqual(self.expected_samples, len(self.publisher.samples))
        self.assertEqual(self.expected_events, len(self.publisher.events))
        self.assertEqual(["9f9d01b9-4a58-4271-9e27-398b21ab20d1"], resources)
    def _check_notifications(self, fake_publisher_cls):
        fake_publisher_cls.side_effect = [self.publisher, self.publisher2]

        self.srv = notification.NotificationService()
        self.srv2 = notification.NotificationService()
        with mock.patch(
                'ceilometer.coordination.PartitionCoordinator'
                '._get_members',
                return_value=['harry', 'lloyd']):
            with mock.patch('uuid.uuid4', return_value='harry'):
                self.srv.start()
            with mock.patch('uuid.uuid4', return_value='lloyd'):
                self.srv2.start()

        notifier = messaging.get_notifier(self.transport,
                                          "compute.vagrant-precise")
        payload1 = TEST_NOTICE_PAYLOAD.copy()
        payload1['instance_id'] = '0'
        notifier.info(context.RequestContext(), 'compute.instance.create.end',
                      payload1)
        payload2 = TEST_NOTICE_PAYLOAD.copy()
        payload2['instance_id'] = '1'
        notifier.info(context.RequestContext(), 'compute.instance.create.end',
                      payload2)
        self.expected_samples = 4
        start = timeutils.utcnow()
        with mock.patch('six.moves.builtins.hash', lambda x: int(x)):
            while timeutils.delta_seconds(start, timeutils.utcnow()) < 60:
                if (len(self.publisher.samples + self.publisher2.samples) >=
                        self.expected_samples):
                    break
                eventlet.sleep(0)
            self.srv.stop()
            self.srv2.stop()

        self.assertEqual(2, len(self.publisher.samples))
        self.assertEqual(2, len(self.publisher2.samples))
        self.assertEqual(
            1, len(set(s.resource_id for s in self.publisher.samples)))
        self.assertEqual(
            1, len(set(s.resource_id for s in self.publisher2.samples)))
    def test_multiple_agents(self, fake_publisher_cls):
        fake_publisher_cls.return_value = self.publisher

        self.srv2 = notification.NotificationService()
        with mock.patch(
                'ceilometer.coordination.PartitionCoordinator'
                '._get_members',
                return_value=['harry', 'lloyd']):
            with mock.patch('uuid.uuid4', return_value='harry'):
                self.srv.start()
            with mock.patch('uuid.uuid4', return_value='lloyd'):
                self.srv2.start()

        notifier = messaging.get_notifier(self.transport,
                                          "compute.vagrant-precise")
        payload1 = TEST_NOTICE_PAYLOAD.copy()
        payload1['instance_id'] = '0'
        notifier.info(context.RequestContext(), 'compute.instance.create.end',
                      payload1)
        payload2 = TEST_NOTICE_PAYLOAD.copy()
        payload2['instance_id'] = '1'
        notifier.info(context.RequestContext(), 'compute.instance.create.end',
                      payload2)
        self.expected_samples = 4
        self.expected_events = 2
        start = timeutils.utcnow()
        while timeutils.delta_seconds(start, timeutils.utcnow()) < 60:
            if (len(self.publisher.samples) >= self.expected_samples
                    and len(self.publisher.events) >= self.expected_events):
                break
            eventlet.sleep(0)
        self.srv.stop()
        self.srv2.stop()

        resources = set(s.resource_id for s in self.publisher.samples)
        self.assertEqual(self.expected_samples, len(self.publisher.samples))
        self.assertEqual(self.expected_events, len(self.publisher.events))
        self.assertEqual(set(['1', '0']), resources)
    def _check_notifications(self, fake_publisher_cls):
        fake_publisher_cls.side_effect = [self.publisher, self.publisher2]

        self.srv = notification.NotificationService()
        self.srv2 = notification.NotificationService()
        with mock.patch('ceilometer.coordination.PartitionCoordinator'
                        '._get_members', return_value=['harry', 'lloyd']):
            with mock.patch('uuid.uuid4', return_value='harry'):
                self.srv.start()
            with mock.patch('uuid.uuid4', return_value='lloyd'):
                self.srv2.start()

        notifier = messaging.get_notifier(self.transport,
                                          "compute.vagrant-precise")
        payload1 = TEST_NOTICE_PAYLOAD.copy()
        payload1['instance_id'] = '0'
        notifier.info(context.RequestContext(), 'compute.instance.create.end',
                      payload1)
        payload2 = TEST_NOTICE_PAYLOAD.copy()
        payload2['instance_id'] = '1'
        notifier.info(context.RequestContext(), 'compute.instance.create.end',
                      payload2)
        self.expected_samples = 4
        start = timeutils.utcnow()
        with mock.patch('six.moves.builtins.hash', lambda x: int(x)):
            while timeutils.delta_seconds(start, timeutils.utcnow()) < 60:
                if (len(self.publisher.samples + self.publisher2.samples) >=
                        self.expected_samples):
                    break
            self.srv.stop()
            self.srv2.stop()

        self.assertEqual(2, len(self.publisher.samples))
        self.assertEqual(2, len(self.publisher2.samples))
        self.assertEqual(1, len(set(
            s.resource_id for s in self.publisher.samples)))
        self.assertEqual(1, len(set(
            s.resource_id for s in self.publisher2.samples)))
    def test_multiple_agents(self, fake_publisher_cls):
        fake_publisher_cls.return_value = self.publisher

        self.srv2 = notification.NotificationService()
        with mock.patch('ceilometer.coordination.PartitionCoordinator'
                        '._get_members', return_value=['harry', 'lloyd']):
            with mock.patch('uuid.uuid4', return_value='harry'):
                self.srv.start()
            with mock.patch('uuid.uuid4', return_value='lloyd'):
                self.srv2.start()

        notifier = messaging.get_notifier(self.transport,
                                          "compute.vagrant-precise")
        payload1 = TEST_NOTICE_PAYLOAD.copy()
        payload1['instance_id'] = '0'
        notifier.info(context.RequestContext(), 'compute.instance.create.end',
                      payload1)
        payload2 = TEST_NOTICE_PAYLOAD.copy()
        payload2['instance_id'] = '1'
        notifier.info(context.RequestContext(), 'compute.instance.create.end',
                      payload2)
        self.expected_samples = 4
        self.expected_events = 2
        start = timeutils.utcnow()
        while timeutils.delta_seconds(start, timeutils.utcnow()) < 60:
            if (len(self.publisher.samples) >= self.expected_samples and
                    len(self.publisher.events) >= self.expected_events):
                break
            eventlet.sleep(0)
        self.srv.stop()
        self.srv2.stop()

        resources = set(s.resource_id for s in self.publisher.samples)
        self.assertEqual(self.expected_samples, len(self.publisher.samples))
        self.assertEqual(self.expected_events, len(self.publisher.events))
        self.assertEqual(set(['1', '0']), resources)
Beispiel #30
0
    def start(self):
        super(NotificationService, self).start()
        self.partition_coordinator = None
        self.coord_lock = threading.Lock()
        self.listeners, self.pipeline_listeners = [], []

        self.pipeline_manager = pipeline.setup_pipeline()

        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        self.transport = messaging.get_transport()

        if cfg.CONF.notification.workload_partitioning:
            self.group_id = self.NOTIFICATION_NAMESPACE
            self.partition_coordinator = coordination.PartitionCoordinator()
            self.partition_coordinator.start()
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')
            self.group_id = None

        self.pipe_manager = self._get_pipe_manager(self.transport,
                                                   self.pipeline_manager)
        self.event_pipe_manager = self._get_event_pipeline_manager(
            self.transport)

        self.listeners, self.pipeline_listeners = [], []
        self._configure_main_queue_listeners(self.pipe_manager,
                                             self.event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.partition_coordinator.join_group(self.group_id)
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)

            @periodics.periodic(spacing=cfg.CONF.coordination.heartbeat,
                                run_immediately=True)
            def heartbeat():
                self.partition_coordinator.heartbeat()

            @periodics.periodic(spacing=cfg.CONF.coordination.check_watchers,
                                run_immediately=True)
            def run_watchers():
                self.partition_coordinator.run_watchers()

            self.periodic = periodics.PeriodicWorker.create(
                [],
                executor_factory=lambda: futures.ThreadPoolExecutor(max_workers
                                                                    =10))
            self.periodic.add(heartbeat)
            self.periodic.add(run_watchers)

            t = threading.Thread(target=self.periodic.start)
            t.daemon = True
            t.start()

            # configure pipelines after all coordination is configured.
            self._configure_pipeline_listeners()

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(
                _LW('Non-metric meters may be collected. It is highly '
                    'advisable to disable these meters using '
                    'ceilometer.conf or the pipeline.yaml'))

        # NOTE(sileht): We have to drop eventlet to drop this last eventlet
        # thread.
        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)

        self.init_pipeline_refresh()
Beispiel #31
0
    def run(self):
        super(NotificationService, self).run()
        self.shutdown = False
        self.periodic = None
        self.partition_coordinator = None
        self.coord_lock = threading.Lock()

        self.listeners = []

        # NOTE(kbespalov): for the pipeline queues used a single amqp host
        # hence only one listener is required
        self.pipeline_listener = None

        self.pipeline_manager = pipeline.setup_pipeline(self.conf)

        self.event_pipeline_manager = pipeline.setup_event_pipeline(self.conf)

        self.transport = messaging.get_transport(self.conf)

        if self.conf.notification.workload_partitioning:
            self.group_id = self.NOTIFICATION_NAMESPACE
            self.partition_coordinator = coordination.PartitionCoordinator(
                self.conf)
            self.partition_coordinator.start()
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')
            self.group_id = None

        self.pipe_manager = self._get_pipe_manager(self.transport,
                                                   self.pipeline_manager)
        self.event_pipe_manager = self._get_event_pipeline_manager(
            self.transport)

        self._configure_main_queue_listeners(self.pipe_manager,
                                             self.event_pipe_manager)

        if self.conf.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.partition_coordinator.join_group(self.group_id)
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)

            @periodics.periodic(spacing=self.conf.coordination.heartbeat,
                                run_immediately=True)
            def heartbeat():
                self.partition_coordinator.heartbeat()

            @periodics.periodic(spacing=self.conf.coordination.check_watchers,
                                run_immediately=True)
            def run_watchers():
                self.partition_coordinator.run_watchers()

            self.periodic = periodics.PeriodicWorker.create(
                [],
                executor_factory=lambda: futures.ThreadPoolExecutor(max_workers
                                                                    =10))
            self.periodic.add(heartbeat)
            self.periodic.add(run_watchers)

            utils.spawn_thread(self.periodic.start)

            # configure pipelines after all coordination is configured.
            with self.coord_lock:
                self._configure_pipeline_listener()

        if not self.conf.notification.disable_non_metric_meters:
            LOG.warning(
                _LW('Non-metric meters may be collected. It is highly '
                    'advisable to disable these meters using '
                    'ceilometer.conf or the pipeline.yaml'))

        self.init_pipeline_refresh()
Beispiel #32
0
    def start(self):
        super(NotificationService, self).start()
        self.periodic = None
        self.partition_coordinator = None
        self.coord_lock = threading.Lock()

        self.listeners = []

        # NOTE(kbespalov): for the pipeline queues used a single amqp host
        # hence only one listener is required
        self.pipeline_listener = None

        self.pipeline_manager = pipeline.setup_pipeline()

        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        self.transport = messaging.get_transport()

        if cfg.CONF.notification.workload_partitioning:
            self.group_id = self.NOTIFICATION_NAMESPACE
            self.partition_coordinator = coordination.PartitionCoordinator()
            self.partition_coordinator.start()
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')
            self.group_id = None

        self.pipe_manager = self._get_pipe_manager(self.transport,
                                                   self.pipeline_manager)
        self.event_pipe_manager = self._get_event_pipeline_manager(
            self.transport)

        self._configure_main_queue_listeners(self.pipe_manager,
                                             self.event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.partition_coordinator.join_group(self.group_id)
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)

            @periodics.periodic(spacing=cfg.CONF.coordination.heartbeat,
                                run_immediately=True)
            def heartbeat():
                self.partition_coordinator.heartbeat()

            @periodics.periodic(spacing=cfg.CONF.coordination.check_watchers,
                                run_immediately=True)
            def run_watchers():
                self.partition_coordinator.run_watchers()

            self.periodic = periodics.PeriodicWorker.create(
                [], executor_factory=lambda:
                futures.ThreadPoolExecutor(max_workers=10))
            self.periodic.add(heartbeat)
            self.periodic.add(run_watchers)

            utils.spawn_thread(self.periodic.start)

            # configure pipelines after all coordination is configured.
            self._configure_pipeline_listener()

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(_LW('Non-metric meters may be collected. It is highly '
                            'advisable to disable these meters using '
                            'ceilometer.conf or the pipeline.yaml'))

        self.init_pipeline_refresh()