def test_event_pipeline_endpoint_requeue_on_failure(self):

        self.CONF.set_override("ack_on_event_error",
                               False,
                               group="notification")
        self.CONF.set_override("telemetry_secret",
                               "not-so-secret",
                               group="publisher")
        test_data = {
            'message_id': uuid.uuid4(),
            'event_type': 'a',
            'generated': '2013-08-08 21:06:37.803826',
            'traits': [{
                'name': 't_text',
                'value': 1,
                'dtype': 'text_trait'
            }],
            'raw': {
                'status': 'started'
            }
        }
        message_sign = utils.compute_signature(test_data, 'not-so-secret')
        test_data['message_signature'] = message_sign

        fake_publisher = mock.Mock()
        self.useFixture(
            mockpatch.Patch('ceilometer.publisher.test.TestPublisher',
                            return_value=fake_publisher))

        pipeline_manager = pipeline.PipelineManager(self.CONF,
                                                    self.pipeline_cfg,
                                                    self.transformer_manager,
                                                    self.p_type)
        event_pipeline_endpoint = pipeline.EventPipelineEndpoint(
            pipeline_manager.pipelines[0])

        fake_publisher.publish_events.side_effect = Exception
        ret = event_pipeline_endpoint.sample([{
            'ctxt': {},
            'publisher_id': 'compute.vagrant-precise',
            'event_type': 'a',
            'payload': [test_data],
            'metadata': {}
        }])
        self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret)
Пример #2
0
    def _configure_pipeline_listener(self):
        ev_pipes = self.event_pipeline_manager.pipelines
        pipelines = self.pipeline_manager.pipelines + ev_pipes
        transport = messaging.get_transport(self.conf)
        partitioned = six.moves.range(
            self.conf.notification.pipeline_processing_queues
        )

        if self.partition_coordinator:
            partitioned = list(filter(
                self.hashring.belongs_to_self, partitioned))

        endpoints = []
        targets = []

        for pipe in pipelines:
            if isinstance(pipe, pipeline.EventPipeline):
                endpoints.append(pipeline.EventPipelineEndpoint(pipe))
            else:
                endpoints.append(pipeline.SamplePipelineEndpoint(pipe))

        for pipe_set, pipe in itertools.product(partitioned, pipelines):
            LOG.debug('Pipeline endpoint: %s from set: %s',
                      pipe.name, pipe_set)
            topic = '%s-%s-%s' % (self.NOTIFICATION_IPC,
                                  pipe.name, pipe_set)
            targets.append(oslo_messaging.Target(topic=topic))

        if self.pipeline_listener:
            self.pipeline_listener.stop()
            self.pipeline_listener.wait()

        self.pipeline_listener = messaging.get_batch_notification_listener(
            transport,
            targets,
            endpoints,
            batch_size=self.conf.notification.batch_size,
            batch_timeout=self.conf.notification.batch_timeout)
        # NOTE(gordc): set single thread to process data sequentially
        # if batching enabled.
        batch = (1 if self.conf.notification.batch_size > 1
                 else self.conf.max_parallel_requests)
        self.pipeline_listener.start(override_pool_size=batch)
Пример #3
0
    def _configure_pipeline_listener(self):
        with self.coord_lock:
            ev_pipes = []
            if cfg.CONF.notification.store_events:
                ev_pipes = self.event_pipeline_manager.pipelines
            pipelines = self.pipeline_manager.pipelines + ev_pipes
            transport = messaging.get_transport()
            partitioned = self.partition_coordinator.extract_my_subset(
                self.group_id,
                range(cfg.CONF.notification.pipeline_processing_queues))

            endpoints = []
            targets = []

            for pipe in pipelines:
                if isinstance(pipe, pipeline.EventPipeline):
                    endpoints.append(
                        pipeline.EventPipelineEndpoint(self.ctxt, pipe))
                else:
                    endpoints.append(
                        pipeline.SamplePipelineEndpoint(self.ctxt, pipe))

            for pipe_set, pipe in itertools.product(partitioned, pipelines):
                LOG.debug('Pipeline endpoint: %s from set: %s', pipe.name,
                          pipe_set)
                topic = '%s-%s-%s' % (self.NOTIFICATION_IPC, pipe.name,
                                      pipe_set)
                targets.append(oslo_messaging.Target(topic=topic))

            if self.pipeline_listener:
                self.pipeline_listener.stop()
                self.pipeline_listener.wait()

            self.pipeline_listener = messaging.get_batch_notification_listener(
                transport,
                targets,
                endpoints,
                batch_size=cfg.CONF.notification.batch_size,
                batch_timeout=cfg.CONF.notification.batch_timeout)
            self.pipeline_listener.start()