Пример #1
0
    def start(self):
        self.pipeline_manager = pipeline.setup_pipeline(
            transformer.TransformerExtensionManager(
                'ceilometer.transformer', ), )

        for interval, task in self.setup_polling_tasks().iteritems():
            self.tg.add_timer(interval, self.interval_task, task=task)
Пример #2
0
def _load_pipeline_manager():
    global _pipeline_manager

    _pipeline_manager = pipeline.setup_pipeline(
        transformer.TransformerExtensionManager('ceilometer.transformer', ),
        publisher.PublisherExtensionManager('ceilometer.publisher', ),
    )
Пример #3
0
    def initialize_service_hook(self, service):
        """Consumers must be declared before consume_thread start."""
        LOG.debug("initialize_service_hooks")
        self.pipeline_manager = pipeline.setup_pipeline(
            transformer.TransformerExtensionManager("ceilometer.transformer")
        )

        LOG.debug("loading notification handlers from %s", self.COLLECTOR_NAMESPACE)
        self.notification_manager = extension.ExtensionManager(namespace=self.COLLECTOR_NAMESPACE, invoke_on_load=True)

        if not list(self.notification_manager):
            LOG.warning("Failed to load any notification handlers for %s", self.COLLECTOR_NAMESPACE)
        self.notification_manager.map(self._setup_subscription)

        LOG.debug("loading dispatchers from %s", self.DISPATCHER_NAMESPACE)
        self.dispatcher_manager = named.NamedExtensionManager(
            namespace=self.DISPATCHER_NAMESPACE,
            names=cfg.CONF.collector.dispatcher,
            invoke_on_load=True,
            invoke_args=[cfg.CONF],
        )
        if not list(self.dispatcher_manager):
            LOG.warning("Failed to load any dispatchers for %s", self.DISPATCHER_NAMESPACE)

        # Set ourselves up as a separate worker for the metering data,
        # since the default for service is to use create_consumer().
        self.conn.create_worker(
            cfg.CONF.publisher_rpc.metering_topic,
            rpc_dispatcher.RpcDispatcher([self]),
            "ceilometer.collector." + cfg.CONF.publisher_rpc.metering_topic,
        )
Пример #4
0
    def start(self):
        super(NotificationService, self).start()
        self.pipeline_manager = pipeline.setup_pipeline()
        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        transport = messaging.get_transport()
        self.partition_coordinator = coordination.PartitionCoordinator()
        self.partition_coordinator.start()

        event_pipe_manager = None
        if cfg.CONF.notification.workload_partitioning:
            pipe_manager = pipeline.SamplePipelineTransportManager()
            for pipe in self.pipeline_manager.pipelines:
                pipe_manager.add_transporter(
                    (pipe.source.support_meter,
                     self._get_notifier(transport, pipe)))
            if cfg.CONF.notification.store_events:
                event_pipe_manager = pipeline.EventPipelineTransportManager()
                for pipe in self.event_pipeline_manager.pipelines:
                    event_pipe_manager.add_transporter(
                        (pipe.source.support_event,
                         self._get_notifier(transport, pipe)))

            self.ctxt = context.get_admin_context()
            self.group_id = self.NOTIFICATION_NAMESPACE
        else:
            # FIXME(sileht): endpoint use notification_topics option
            # and it should not because this is oslo_messaging option
            # not a ceilometer, until we have a something to get
            # the notification_topics in an other way
            # we must create a transport to ensure the option have
            # beeen registered by oslo_messaging
            messaging.get_notifier(transport, '')
            pipe_manager = self.pipeline_manager
            if cfg.CONF.notification.store_events:
                event_pipe_manager = self.event_pipeline_manager
            self.group_id = None

        self.listeners, self.pipeline_listeners = [], []
        self._configure_main_queue_listeners(pipe_manager, event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            self.partition_coordinator.join_group(self.group_id)
            self._configure_pipeline_listeners()
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)

            self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                              self.partition_coordinator.heartbeat)
            self.tg.add_timer(cfg.CONF.coordination.check_watchers,
                              self.partition_coordinator.run_watchers)

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(
                _LW('Non-metric meters may be collected. It is highly '
                    'advisable to disable these meters using '
                    'ceilometer.conf or the pipeline.yaml'))
        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
Пример #5
0
    def start(self):
        super(NotificationService, self).start()
        self.partition_coordinator = None
        self.coord_lock = threading.Lock()

        self.listeners = []

        # NOTE(kbespalov): for the pipeline queues used a single amqp host
        # hence only one listener is required
        self.pipeline_listener = None

        self.pipeline_manager = pipeline.setup_pipeline()

        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        self.transport = messaging.get_transport()

        if cfg.CONF.notification.workload_partitioning:
            self.ctxt = context.get_admin_context()
            self.group_id = self.NOTIFICATION_NAMESPACE
            self.partition_coordinator = coordination.PartitionCoordinator()
            self.partition_coordinator.start()
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')
            self.group_id = None

        self.pipe_manager = self._get_pipe_manager(self.transport,
                                                   self.pipeline_manager)
        self.event_pipe_manager = self._get_event_pipeline_manager(
            self.transport)

        self._configure_main_queue_listeners(self.pipe_manager,
                                             self.event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.partition_coordinator.join_group(self.group_id)
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)
            self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                              self.partition_coordinator.heartbeat)
            self.tg.add_timer(cfg.CONF.coordination.check_watchers,
                              self.partition_coordinator.run_watchers)
            # configure pipelines after all coordination is configured.
            self._configure_pipeline_listener()

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(_LW('Non-metric meters may be collected. It is highly '
                            'advisable to disable these meters using '
                            'ceilometer.conf or the pipeline.yaml'))
        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)

        self.init_pipeline_refresh()
Пример #6
0
    def refresh_pipeline(self):
        mtime = pipeline.get_pipeline_mtime()
        if mtime > self.pipeline_mtime:
            LOG.info(_LI('Pipeline configuration file has been updated.'))

            self.pipeline_mtime = mtime
            _hash = pipeline.get_pipeline_hash()

            if _hash != self.pipeline_hash:
                LOG.info(_LI("Detected change in pipeline configuration."))

                try:
                    # Pipeline in the notification agent.
                    if hasattr(self, 'pipeline_manager'):
                        self.pipeline_manager = pipeline.setup_pipeline()
                    # Polling in the polling agent.
                    elif hasattr(self, 'polling_manager'):
                        self.polling_manager = pipeline.setup_polling()
                    LOG.debug("Pipeline has been refreshed. "
                              "old hash: %(old)s, new hash: %(new)s",
                              {'old': self.pipeline_hash,
                               'new': _hash})
                except Exception as err:
                    LOG.debug("Active pipeline config's hash is %s",
                              self.pipeline_hash)
                    LOG.exception(_LE('Unable to load changed pipeline: %s')
                                  % err)
                    return

                self.pipeline_hash = _hash
                self.reload_pipeline()
Пример #7
0
    def initialize_service_hook(self, service):
        '''Consumers must be declared before consume_thread start.'''
        LOG.debug('initialize_service_hooks')
        self.pipeline_manager = pipeline.setup_pipeline(
            transformer.TransformerExtensionManager(
                'ceilometer.transformer',
            ),
        )

        LOG.debug('loading notification handlers from %s',
                  self.COLLECTOR_NAMESPACE)
        self.notification_manager = \
            extension.ExtensionManager(
                namespace=self.COLLECTOR_NAMESPACE,
                invoke_on_load=True,
            )

        if not list(self.notification_manager):
            LOG.warning('Failed to load any notification handlers for %s',
                        self.COLLECTOR_NAMESPACE)
        self.notification_manager.map(self._setup_subscription)

        # Set ourselves up as a separate worker for the metering data,
        # since the default for service is to use create_consumer().
        self.conn.create_worker(
            cfg.CONF.publisher_rpc.metering_topic,
            rpc_dispatcher.RpcDispatcher([self]),
            'ceilometer.collector.' + cfg.CONF.publisher_rpc.metering_topic,
        )
Пример #8
0
 def __init__(self):
     if self.__class__.pipeline_manager is None:
         # this is done here as the cfg options are not available
         # when the file is imported.
         self.__class__.pipeline_manager = pipeline.setup_pipeline(
             transformer.TransformerExtensionManager(
                 'ceilometer.transformer'))
Пример #9
0
 def __init__(self):
     if self.__class__.pipeline_manager is None:
         # this is done here as the cfg options are not available
         # when the file is imported.
         self.__class__.pipeline_manager = pipeline.setup_pipeline(
             transformer.TransformerExtensionManager(
                 'ceilometer.transformer'))
Пример #10
0
    def initialize_service_hook(self, service):
        '''Consumers must be declared before consume_thread start.'''
        LOG.debug('initialize_service_hooks')
        publisher_manager = dispatch.NameDispatchExtensionManager(
            namespace=pipeline.PUBLISHER_NAMESPACE,
            check_func=lambda x: True,
            invoke_on_load=True,
        )
        self.pipeline_manager = pipeline.setup_pipeline(publisher_manager)

        LOG.debug('loading notification handlers from %s',
                  self.COLLECTOR_NAMESPACE)
        self.notification_manager = \
            extension_manager.ActivatedExtensionManager(
                namespace=self.COLLECTOR_NAMESPACE,
                disabled_names=cfg.CONF.disabled_notification_listeners,
            )

        if not list(self.notification_manager):
            LOG.warning('Failed to load any notification handlers for %s',
                        self.COLLECTOR_NAMESPACE)
        self.notification_manager.map(self._setup_subscription)

        # Set ourselves up as a separate worker for the metering data,
        # since the default for service is to use create_consumer().
        self.conn.create_worker(
            cfg.CONF.metering_topic,
            rpc_dispatcher.RpcDispatcher([self]),
            'ceilometer.collector.' + cfg.CONF.metering_topic,
        )
Пример #11
0
    def start(self):
        self.pipeline_manager = pipeline.setup_pipeline()

        for interval, task in six.iteritems(self.setup_polling_tasks()):
            self.tg.add_timer(interval,
                              self.interval_task,
                              task=task)
Пример #12
0
    def start(self):
        self.pipeline_manager = pipeline.setup_pipeline()

        for interval, task in six.iteritems(self.setup_polling_tasks()):
            self.tg.add_timer(interval,
                              self.interval_task,
                              task=task)
Пример #13
0
    def initialize_service_hook(self, service):
        '''Consumers must be declared before consume_thread start.'''
        LOG.debug('initialize_service_hooks')
        self.pipeline_manager = pipeline.setup_pipeline(
            transformer.TransformerExtensionManager(
                'ceilometer.transformer', ), )

        LOG.debug('loading notification handlers from %s',
                  self.COLLECTOR_NAMESPACE)
        self.notification_manager = \
            extension.ExtensionManager(
                namespace=self.COLLECTOR_NAMESPACE,
                invoke_on_load=True,
            )

        if not list(self.notification_manager):
            LOG.warning('Failed to load any notification handlers for %s',
                        self.COLLECTOR_NAMESPACE)
        self.notification_manager.map(self._setup_subscription)

        # Set ourselves up as a separate worker for the metering data,
        # since the default for service is to use create_consumer().
        self.conn.create_worker(
            cfg.CONF.publisher_rpc.metering_topic,
            rpc_dispatcher.RpcDispatcher([self]),
            'ceilometer.collector.' + cfg.CONF.publisher_rpc.metering_topic,
        )
Пример #14
0
    def start(self):
        super(NotificationService, self).start()
        self.pipeline_manager = pipeline.setup_pipeline()
        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        transport = messaging.get_transport()
        self.partition_coordinator = coordination.PartitionCoordinator()
        self.partition_coordinator.start()

        event_pipe_manager = None
        if cfg.CONF.notification.workload_partitioning:
            pipe_manager = pipeline.SamplePipelineTransportManager()
            for pipe in self.pipeline_manager.pipelines:
                pipe_manager.add_transporter(
                    (pipe.source.support_meter,
                     self._get_notifier(transport, pipe)))
            if cfg.CONF.notification.store_events:
                event_pipe_manager = pipeline.EventPipelineTransportManager()
                for pipe in self.event_pipeline_manager.pipelines:
                    event_pipe_manager.add_transporter(
                        (pipe.source.support_event,
                         self._get_notifier(transport, pipe)))

            self.ctxt = context.get_admin_context()
            self.group_id = self.NOTIFICATION_NAMESPACE
        else:
            # FIXME(sileht): endpoint use notification_topics option
            # and it should not because this is oslo_messaging option
            # not a ceilometer, until we have a something to get
            # the notification_topics in an other way
            # we must create a transport to ensure the option have
            # beeen registered by oslo_messaging
            messaging.get_notifier(transport, '')
            pipe_manager = self.pipeline_manager
            if cfg.CONF.notification.store_events:
                event_pipe_manager = self.event_pipeline_manager
            self.group_id = None

        self.listeners, self.pipeline_listeners = [], []
        self._configure_main_queue_listeners(pipe_manager, event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            self.partition_coordinator.join_group(self.group_id)
            self._configure_pipeline_listeners()
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)

            self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                              self.partition_coordinator.heartbeat)
            self.tg.add_timer(cfg.CONF.coordination.check_watchers,
                              self.partition_coordinator.run_watchers)

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(_LW('Non-metric meters may be collected. It is highly '
                            'advisable to disable these meters using '
                            'ceilometer.conf or the pipeline.yaml'))
        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
Пример #15
0
 def reinitialize(self):
     self.__pipeline_manager = pipeline.setup_pipeline()
     self.__meters_from_pipeline = set()
     for pipe in self.__pipeline_manager.pipelines:
         if not isinstance(pipe, pipeline.EventPipeline):
             for meter in pipe.source.meters:
                 if meter not in self.__meters_from_pipeline:
                     self.__meters_from_pipeline.add(meter)
Пример #16
0
def _load_pipeline_manager():
    global _pipeline_manager

    _pipeline_manager = pipeline.setup_pipeline(
        transformer.TransformerExtensionManager(
            'ceilometer.transformer',
        ),
    )
Пример #17
0
    def __init__(self, extension_manager):
        self.pipeline_manager = pipeline.setup_pipeline(
            transformer.TransformerExtensionManager(
                'ceilometer.transformer', ), )

        self.pollster_manager = extension_manager

        self.context = context.RequestContext('admin', 'admin', is_admin=True)
Пример #18
0
    def run(self):
        # Delay startup so workers are jittered
        time.sleep(self.startup_delay)

        super(NotificationService, self).run()
        self.shutdown = False
        self.periodic = None
        self.coord_lock = threading.Lock()

        self.listeners = []

        # NOTE(kbespalov): for the pipeline queues used a single amqp host
        # hence only one listener is required
        self.pipeline_listener = None

        self.pipeline_manager = pipeline.setup_pipeline(self.conf)

        self.event_pipeline_manager = pipeline.setup_event_pipeline(self.conf)

        self.transport = messaging.get_transport(self.conf)

        if self.conf.notification.workload_partitioning:
            self.partition_coordinator.start()
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')

        pipe_manager = self._get_pipe_manager(self.transport,
                                              self.pipeline_manager)
        event_pipe_manager = self._get_event_pipeline_manager(self.transport)

        self._configure_main_queue_listeners(pipe_manager, event_pipe_manager)

        if self.conf.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.hashring = self.partition_coordinator.join_partitioned_group(
                self.NOTIFICATION_NAMESPACE)

            @periodics.periodic(spacing=self.conf.coordination.check_watchers,
                                run_immediately=True)
            def run_watchers():
                self.partition_coordinator.run_watchers()

            self.periodic = periodics.PeriodicWorker.create(
                [],
                executor_factory=lambda: futures.ThreadPoolExecutor(max_workers
                                                                    =10))
            self.periodic.add(run_watchers)

            utils.spawn_thread(self.periodic.start)
            # configure pipelines after all coordination is configured.
            with self.coord_lock:
                self._configure_pipeline_listener()
Пример #19
0
    def start(self):
        super(NotificationService, self).start()
        self.partition_coordinator = None
        self.coord_lock = threading.Lock()
        self.listeners, self.pipeline_listeners = [], []

        self.pipeline_manager = pipeline.setup_pipeline()

        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        self.transport = messaging.get_transport()

        if cfg.CONF.notification.workload_partitioning:
            self.ctxt = context.get_admin_context()
            self.group_id = self.NOTIFICATION_NAMESPACE
            self.partition_coordinator = coordination.PartitionCoordinator()
            self.partition_coordinator.start()
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')
            self.group_id = None

        self.pipe_manager = self._get_pipe_manager(self.transport,
                                                   self.pipeline_manager)
        self.event_pipe_manager = self._get_event_pipeline_manager(
            self.transport)

        self.listeners, self.pipeline_listeners = [], []
        self._configure_main_queue_listeners(self.pipe_manager,
                                             self.event_pipe_manager)

        if cfg.CONF.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.partition_coordinator.join_group(self.group_id)
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)
            self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                              self.partition_coordinator.heartbeat)
            self.tg.add_timer(cfg.CONF.coordination.check_watchers,
                              self.partition_coordinator.run_watchers)
            # configure pipelines after all coordination is configured.
            self._configure_pipeline_listeners()

        if not cfg.CONF.notification.disable_non_metric_meters:
            LOG.warning(
                _LW('Non-metric meters may be collected. It is highly '
                    'advisable to disable these meters using '
                    'ceilometer.conf or the pipeline.yaml'))
        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)

        self.init_pipeline_refresh()
Пример #20
0
    def __init__(self, app, conf):
        self.app = app
        service.prepare_service()
        publisher_manager = dispatch.NameDispatchExtensionManager(
            namespace=pipeline.PUBLISHER_NAMESPACE,
            check_func=lambda x: True,
            invoke_on_load=True,
        )

        self.pipeline_manager = pipeline.setup_pipeline(publisher_manager)
Пример #21
0
    def __init__(self, extension_manager):
        self.pipeline_manager = pipeline.setup_pipeline(
            transformer.TransformerExtensionManager(
                'ceilometer.transformer',
            ),
        )

        self.pollster_manager = extension_manager

        self.context = context.RequestContext('admin', 'admin', is_admin=True)
Пример #22
0
    def __init__(self, extension_manager):
        publisher_manager = dispatch.NameDispatchExtensionManager(
            namespace=pipeline.PUBLISHER_NAMESPACE,
            check_func=lambda x: True,
            invoke_on_load=True,
        )

        self.pipeline_manager = pipeline.setup_pipeline(publisher_manager)

        self.pollster_manager = extension_manager
Пример #23
0
 def __init__(self):
     if not (self._instance and self._inited):
         self._inited = True
         self.__pipeline_manager = pipeline.setup_pipeline()
         self.__meters_from_pipeline = set()
         for pipe in self.__pipeline_manager.pipelines:
             if not isinstance(pipe, pipeline.EventPipeline):
                 for meter in pipe.source.meters:
                     if meter not in self.__meters_from_pipeline:
                         self.__meters_from_pipeline.add(meter)
Пример #24
0
def send_sample():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.StrOpt('sample-name', short='n', help='Meter name.',
                   required=True),
        cfg.StrOpt('sample-type',
                   short='y',
                   help='Meter type (gauge, delta, cumulative).',
                   default='gauge',
                   required=True),
        cfg.StrOpt('sample-unit', short='U', help='Meter unit.'),
        cfg.IntOpt('sample-volume',
                   short='l',
                   help='Meter volume value.',
                   default=1),
        cfg.StrOpt('sample-resource',
                   short='r',
                   help='Meter resource id.',
                   required=True),
        cfg.StrOpt('sample-user', short='u', help='Meter user id.'),
        cfg.StrOpt('sample-project', short='p', help='Meter project id.'),
        cfg.StrOpt('sample-timestamp',
                   short='i',
                   help='Meter timestamp.',
                   default=timeutils.utcnow().isoformat()),
        cfg.StrOpt('sample-metadata', short='m', help='Meter metadata.'),
    ])

    service.prepare_service(conf=conf)

    # Set up logging to use the console
    console = logging.StreamHandler(sys.stderr)
    console.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    root_logger = logging.getLogger('')
    root_logger.addHandler(console)
    root_logger.setLevel(logging.DEBUG)

    pipeline_manager = pipeline.setup_pipeline(
        conf, extension.ExtensionManager('ceilometer.transformer'))

    with pipeline_manager.publisher() as p:
        p([
            sample.Sample(name=conf.sample_name,
                          type=conf.sample_type,
                          unit=conf.sample_unit,
                          volume=conf.sample_volume,
                          user_id=conf.sample_user,
                          project_id=conf.sample_project,
                          resource_id=conf.sample_resource,
                          timestamp=conf.sample_timestamp,
                          resource_metadata=conf.sample_metadata
                          and eval(conf.sample_metadata))
        ])
Пример #25
0
    def refresh_pipeline(self):
        """Refreshes appropriate pipeline, then delegates to agent."""

        if cfg.CONF.refresh_pipeline_cfg:
            pipeline_hash = self.pipeline_changed()
            if pipeline_hash:
                try:
                    # Pipeline in the notification agent.
                    if hasattr(self, 'pipeline_manager'):
                        self.pipeline_manager = pipeline.setup_pipeline()
                    # Polling in the polling agent.
                    elif hasattr(self, 'polling_manager'):
                        self.polling_manager = pipeline.setup_polling()
                    LOG.debug(
                        "Pipeline has been refreshed. "
                        "old hash: %(old)s, new hash: %(new)s", {
                            'old': self.pipeline_hash,
                            'new': pipeline_hash
                        })
                    self.set_pipeline_hash(pipeline_hash)
                    self.pipeline_validated = True
                except Exception as err:
                    LOG.debug("Active pipeline config's hash is %s",
                              self.pipeline_hash)
                    LOG.exception(
                        _LE('Unable to load changed pipeline: %s') % err)

        if cfg.CONF.refresh_event_pipeline_cfg:
            ev_pipeline_hash = self.pipeline_changed(pipeline.EVENT_TYPE)
            if ev_pipeline_hash:
                try:
                    # Pipeline in the notification agent.
                    if hasattr(self, 'event_pipeline_manager'):
                        self.event_pipeline_manager = (
                            pipeline.setup_event_pipeline())

                    LOG.debug(
                        "Event Pipeline has been refreshed. "
                        "old hash: %(old)s, new hash: %(new)s", {
                            'old': self.event_pipeline_hash,
                            'new': ev_pipeline_hash
                        })
                    self.set_pipeline_hash(ev_pipeline_hash,
                                           pipeline.EVENT_TYPE)
                    self.event_pipeline_validated = True
                except Exception as err:
                    LOG.debug("Active event pipeline config's hash is %s",
                              self.event_pipeline_hash)
                    LOG.exception(
                        _LE('Unable to load changed event pipeline:'
                            ' %s') % err)

        if self.pipeline_validated or self.event_pipeline_validated:
            self.reload_pipeline()
            self.clear_pipeline_validation_status()
Пример #26
0
    def start(self):
        self.pipeline_manager = pipeline.setup_pipeline(
            transformer.TransformerExtensionManager(
                'ceilometer.transformer',
            ),
        )

        for interval, task in self.setup_polling_tasks().iteritems():
            self.tg.add_timer(interval,
                              self.interval_task,
                              task=task)
Пример #27
0
    def start(self):
        super(NotificationService, self).start()
        # FIXME(sileht): endpoint use notification_topics option
        # and it should not because this is oslo.messaging option
        # not a ceilometer, until we have a something to get
        # the notification_topics in an other way
        # we must create a transport to ensure the option have
        # beeen registered by oslo.messaging
        transport = messaging.get_transport()
        messaging.get_notifier(transport, '')

        self.pipeline_manager = pipeline.setup_pipeline()

        self.notification_manager = self._get_notifications_manager(
            self.pipeline_manager)
        if not list(self.notification_manager):
            LOG.warning(_('Failed to load any notification handlers for %s'),
                        self.NOTIFICATION_NAMESPACE)

        ack_on_error = cfg.CONF.notification.ack_on_event_error

        endpoints = []
        if cfg.CONF.notification.store_events:
            endpoints = [event_endpoint.EventsNotificationEndpoint()]

        targets = []
        for ext in self.notification_manager:
            handler = ext.obj
            LOG.debug(
                _('Event types from %(name)s: %(type)s'
                  ' (ack_on_error=%(error)s)') % {
                      'name': ext.name,
                      'type': ', '.join(handler.event_types),
                      'error': ack_on_error
                  })
            # NOTE(gordc): this could be a set check but oslo.messaging issue
            # https://bugs.launchpad.net/oslo.messaging/+bug/1398511
            # This ensures we don't create multiple duplicate consumers.
            for new_tar in handler.get_targets(cfg.CONF):
                if new_tar not in targets:
                    targets.append(new_tar)
            endpoints.append(handler)

        urls = cfg.CONF.notification.messaging_urls or [None]
        self.listeners = []
        for url in urls:
            transport = messaging.get_transport(url)
            listener = messaging.get_notification_listener(
                transport, targets, endpoints)
            listener.start()
            self.listeners.append(listener)

        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
Пример #28
0
    def initialize_service_hook(self, service):
        self.pipeline_manager = pipeline.setup_pipeline(
            transformer.TransformerExtensionManager(
                'ceilometer.transformer',
            ),
        )

        self.service = service
        for interval, task in self.setup_polling_tasks().iteritems():
            self.service.tg.add_timer(interval,
                                      self.interval_task,
                                      task=task)
Пример #29
0
    def refresh_pipeline(self):
        """Refreshes appropriate pipeline, then delegates to agent."""

        if cfg.CONF.refresh_pipeline_cfg:
            manager = None
            if hasattr(self, 'pipeline_manager'):
                manager = self.pipeline_manager
            elif hasattr(self, 'polling_manager'):
                manager = self.polling_manager
            pipeline_hash = manager.cfg_changed() if manager else None
            if pipeline_hash:
                try:
                    LOG.debug(
                        "Pipeline has been refreshed. "
                        "old hash: %(old)s, new hash: %(new)s", {
                            'old': manager.cfg_hash,
                            'new': pipeline_hash
                        })
                    # Pipeline in the notification agent.
                    if hasattr(self, 'pipeline_manager'):
                        self.pipeline_manager = pipeline.setup_pipeline()
                    # Polling in the polling agent.
                    elif hasattr(self, 'polling_manager'):
                        self.polling_manager = pipeline.setup_polling()
                    self.pipeline_validated = True
                except Exception as err:
                    LOG.exception(
                        _LE('Unable to load changed pipeline: %s') % err)

        if cfg.CONF.refresh_event_pipeline_cfg:
            # Pipeline in the notification agent.
            manager = (self.event_pipeline_manager if hasattr(
                self, 'event_pipeline_manager') else None)
            ev_pipeline_hash = manager.cfg_changed()
            if ev_pipeline_hash:
                try:
                    LOG.debug(
                        "Event Pipeline has been refreshed. "
                        "old hash: %(old)s, new hash: %(new)s", {
                            'old': manager.cfg_hash,
                            'new': ev_pipeline_hash
                        })
                    self.event_pipeline_manager = (
                        pipeline.setup_event_pipeline())
                    self.event_pipeline_validated = True
                except Exception as err:
                    LOG.exception(
                        _LE('Unable to load changed event pipeline:'
                            ' %s') % err)

        if self.pipeline_validated or self.event_pipeline_validated:
            self.reload_pipeline()
            self.clear_pipeline_validation_status()
Пример #30
0
    def start(self):
        super(NotificationService, self).start()
        self.pipeline_manager = pipeline.setup_pipeline()
        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        transport = messaging.get_transport()
        self.partition_coordinator = coordination.PartitionCoordinator()
        self.partition_coordinator.start()

        event_transporter = None
        if cfg.CONF.notification.workload_partitioning:
            transporter = []
            for pipe in self.pipeline_manager.pipelines:
                transporter.append(self._get_notifier(transport, pipe))
            if cfg.CONF.notification.store_events:
                event_transporter = []
                for pipe in self.event_pipeline_manager.pipelines:
                    event_transporter.append(
                        self._get_notifier(transport, pipe))

            self.ctxt = context.get_admin_context()
            self.group_id = self.NOTIFICATION_NAMESPACE
        else:
            # FIXME(sileht): endpoint use notification_topics option
            # and it should not because this is oslo.messaging option
            # not a ceilometer, until we have a something to get
            # the notification_topics in an other way
            # we must create a transport to ensure the option have
            # beeen registered by oslo.messaging
            messaging.get_notifier(transport, '')
            transporter = self.pipeline_manager
            if cfg.CONF.notification.store_events:
                event_transporter = self.event_pipeline_manager
            self.group_id = None

        self.listeners = self.pipeline_listeners = []
        self._configure_main_queue_listeners(transporter, event_transporter)

        if cfg.CONF.notification.workload_partitioning:
            self.partition_coordinator.join_group(self.group_id)
            self._configure_pipeline_listeners()
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)

            self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                              self.partition_coordinator.heartbeat)
            self.tg.add_timer(cfg.CONF.coordination.check_watchers,
                              self.partition_coordinator.run_watchers)

        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
Пример #31
0
    def start(self):
        super(NotificationService, self).start()
        self.pipeline_manager = pipeline.setup_pipeline()
        if cfg.CONF.notification.store_events:
            self.event_pipeline_manager = pipeline.setup_event_pipeline()

        transport = messaging.get_transport()
        self.partition_coordinator = coordination.PartitionCoordinator()
        self.partition_coordinator.start()

        event_transporter = None
        if cfg.CONF.notification.workload_partitioning:
            transporter = []
            for pipe in self.pipeline_manager.pipelines:
                transporter.append(self._get_notifier(transport, pipe))
            if cfg.CONF.notification.store_events:
                event_transporter = []
                for pipe in self.event_pipeline_manager.pipelines:
                    event_transporter.append(self._get_notifier(transport,
                                                                pipe))

            self.ctxt = context.get_admin_context()
            self.group_id = self.NOTIFICATION_NAMESPACE
        else:
            # FIXME(sileht): endpoint use notification_topics option
            # and it should not because this is oslo.messaging option
            # not a ceilometer, until we have a something to get
            # the notification_topics in an other way
            # we must create a transport to ensure the option have
            # beeen registered by oslo.messaging
            messaging.get_notifier(transport, '')
            transporter = self.pipeline_manager
            if cfg.CONF.notification.store_events:
                event_transporter = self.event_pipeline_manager
            self.group_id = None

        self.listeners = self.pipeline_listeners = []
        self._configure_main_queue_listeners(transporter, event_transporter)

        if cfg.CONF.notification.workload_partitioning:
            self.partition_coordinator.join_group(self.group_id)
            self._configure_pipeline_listeners()
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)

            self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                              self.partition_coordinator.heartbeat)
            self.tg.add_timer(cfg.CONF.coordination.check_watchers,
                              self.partition_coordinator.run_watchers)

        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
Пример #32
0
    def start(self):
        super(NotificationService, self).start()
        # FIXME(sileht): endpoint use notification_topics option
        # and it should not because this is oslo.messaging option
        # not a ceilometer, until we have a something to get
        # the notification_topics in an other way
        # we must create a transport to ensure the option have
        # beeen registered by oslo.messaging
        transport = messaging.get_transport()
        messaging.get_notifier(transport, '')

        self.pipeline_manager = pipeline.setup_pipeline()

        self.notification_manager = self._get_notifications_manager(
            self.pipeline_manager)
        if not list(self.notification_manager):
            LOG.warning(_('Failed to load any notification handlers for %s'),
                        self.NOTIFICATION_NAMESPACE)

        ack_on_error = cfg.CONF.notification.ack_on_event_error

        endpoints = []
        if cfg.CONF.notification.store_events:
            endpoints = [event_endpoint.EventsNotificationEndpoint()]

        targets = []
        for ext in self.notification_manager:
            handler = ext.obj
            LOG.debug(_('Event types from %(name)s: %(type)s'
                        ' (ack_on_error=%(error)s)') %
                      {'name': ext.name,
                       'type': ', '.join(handler.event_types),
                       'error': ack_on_error})
            # NOTE(gordc): this could be a set check but oslo.messaging issue
            # https://bugs.launchpad.net/oslo.messaging/+bug/1398511
            # This ensures we don't create multiple duplicate consumers.
            for new_tar in handler.get_targets(cfg.CONF):
                if new_tar not in targets:
                    targets.append(new_tar)
            endpoints.append(handler)

        urls = cfg.CONF.notification.messaging_urls or [None]
        self.listeners = []
        for url in urls:
            transport = messaging.get_transport(url)
            listener = messaging.get_notification_listener(
                transport, targets, endpoints)
            listener.start()
            self.listeners.append(listener)

        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
Пример #33
0
    def __init__(self, app, conf):
        self.app = app

        self.metadata_headers = [
            h.strip().replace("-", "_").lower() for h in conf.get("metadata_headers", "").split(",") if h.strip()
        ]

        service.prepare_service([])

        self.pipeline_manager = pipeline.setup_pipeline()
        self.reseller_prefix = conf.get("reseller_prefix", "AUTH_")
        if self.reseller_prefix and self.reseller_prefix[-1] != "_":
            self.reseller_prefix += "_"
Пример #34
0
    def __init__(self, app, conf):
        self.app = app

        self.metadata_headers = [
            h.strip().replace('-', '_').lower()
            for h in conf.get("metadata_headers", "").split(",") if h.strip()
        ]

        service.prepare_service([])

        self.pipeline_manager = pipeline.setup_pipeline(
            transformer.TransformerExtensionManager(
                'ceilometer.transformer', ), )
Пример #35
0
    def __init__(self, app, conf):
        self.app = app

        self.metadata_headers = [
            h.strip().replace('-', '_').lower()
            for h in conf.get("metadata_headers", "").split(",") if h.strip()
        ]

        service.prepare_service([])

        self.pipeline_manager = pipeline.setup_pipeline()
        self.reseller_prefix = conf.get('reseller_prefix', 'AUTH_')
        if self.reseller_prefix and self.reseller_prefix[-1] != '_':
            self.reseller_prefix += '_'
Пример #36
0
    def refresh_pipeline(self):
        """Refreshes appropriate pipeline, then delegates to agent."""

        if cfg.CONF.refresh_pipeline_cfg:
            pipeline_hash = self.pipeline_changed()
            if pipeline_hash:
                try:
                    # Pipeline in the notification agent.
                    if hasattr(self, 'pipeline_manager'):
                        self.pipeline_manager = pipeline.setup_pipeline()
                    # Polling in the polling agent.
                    elif hasattr(self, 'polling_manager'):
                        self.polling_manager = pipeline.setup_polling()
                    LOG.debug("Pipeline has been refreshed. "
                              "old hash: %(old)s, new hash: %(new)s",
                              {'old': self.pipeline_hash,
                               'new': pipeline_hash})
                    self.set_pipeline_hash(pipeline_hash)
                    self.pipeline_validated = True
                except Exception as err:
                    LOG.debug("Active pipeline config's hash is %s",
                              self.pipeline_hash)
                    LOG.exception(_LE('Unable to load changed pipeline: %s')
                                  % err)

        if cfg.CONF.refresh_event_pipeline_cfg:
            ev_pipeline_hash = self.pipeline_changed(pipeline.EVENT_TYPE)
            if ev_pipeline_hash:
                try:
                    # Pipeline in the notification agent.
                    if hasattr(self, 'event_pipeline_manager'):
                        self.event_pipeline_manager = (pipeline.
                                                       setup_event_pipeline())

                    LOG.debug("Event Pipeline has been refreshed. "
                              "old hash: %(old)s, new hash: %(new)s",
                              {'old': self.event_pipeline_hash,
                               'new': ev_pipeline_hash})
                    self.set_pipeline_hash(ev_pipeline_hash,
                                           pipeline.EVENT_TYPE)
                    self.event_pipeline_validated = True
                except Exception as err:
                    LOG.debug("Active event pipeline config's hash is %s",
                              self.event_pipeline_hash)
                    LOG.exception(_LE('Unable to load changed event pipeline:'
                                      ' %s') % err)

        if self.pipeline_validated or self.event_pipeline_validated:
            self.reload_pipeline()
            self.clear_pipeline_validation_status()
Пример #37
0
    def refresh_pipeline(self):
        """Refreshes appropriate pipeline, then delegates to agent."""

        if self.conf.refresh_pipeline_cfg:
            manager = None
            if hasattr(self, 'pipeline_manager'):
                manager = self.pipeline_manager
            elif hasattr(self, 'polling_manager'):
                manager = self.polling_manager
            pipeline_hash = manager.cfg_changed() if manager else None
            if pipeline_hash:
                try:
                    LOG.debug("Pipeline has been refreshed. "
                              "old hash: %(old)s, new hash: %(new)s",
                              {'old': manager.cfg_hash,
                               'new': pipeline_hash})
                    # Pipeline in the notification agent.
                    if hasattr(self, 'pipeline_manager'):
                        self.pipeline_manager = pipeline.setup_pipeline(
                            self.conf)
                    # Polling in the polling agent.
                    elif hasattr(self, 'polling_manager'):
                        self.polling_manager = pipeline.setup_polling(
                            self.conf)
                    self.pipeline_validated = True
                except Exception as err:
                    LOG.exception(_LE('Unable to load changed pipeline: %s')
                                  % err)

        if self.conf.refresh_event_pipeline_cfg:
            # Pipeline in the notification agent.
            manager = (self.event_pipeline_manager
                       if hasattr(self, 'event_pipeline_manager') else None)
            ev_pipeline_hash = manager.cfg_changed()
            if ev_pipeline_hash:
                try:
                    LOG.debug("Event Pipeline has been refreshed. "
                              "old hash: %(old)s, new hash: %(new)s",
                              {'old': manager.cfg_hash,
                               'new': ev_pipeline_hash})
                    self.event_pipeline_manager = (
                        pipeline. setup_event_pipeline(self.conf))
                    self.event_pipeline_validated = True
                except Exception as err:
                    LOG.exception(_LE('Unable to load changed event pipeline:'
                                      ' %s') % err)

        if self.pipeline_validated or self.event_pipeline_validated:
            self.reload_pipeline()
            self.clear_pipeline_validation_status()
Пример #38
0
    def __init__(self, app, conf):
        self.app = app
        self.logger = utils.get_logger(conf, log_route='ceilometer')

        self.metadata_headers = [
            h.strip().replace('-', '_').lower()
            for h in conf.get("metadata_headers", "").split(",") if h.strip()
        ]

        service.prepare_service([])

        self.pipeline_manager = pipeline.setup_pipeline()
        self.reseller_prefix = conf.get('reseller_prefix', 'AUTH_')
        if self.reseller_prefix and self.reseller_prefix[-1] != '_':
            self.reseller_prefix += '_'
Пример #39
0
    def __init__(self, app, conf):
        self.app = app

        self.metadata_headers = [h.strip().replace('-', '_').lower()
                                 for h in conf.get(
                                     "metadata_headers",
                                     "").split(",") if h.strip()]

        service.prepare_service([])

        self.pipeline_manager = pipeline.setup_pipeline(
            transformer.TransformerExtensionManager(
                'ceilometer.transformer',
            ),
        )
Пример #40
0
    def start(self):
        self.pipeline_manager = pipeline.setup_pipeline()

        self.partition_coordinator.start()
        self.join_partitioning_groups()

        # allow time for coordination if necessary
        delay_start = self.partition_coordinator.is_active()

        for interval, task in six.iteritems(self.setup_polling_tasks()):
            self.tg.add_timer(interval,
                              self.interval_task,
                              initial_delay=interval if delay_start else None,
                              task=task)
        self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                          self.partition_coordinator.heartbeat)
Пример #41
0
    def __init__(self, app, conf):
        self.app = app

        self.metadata_headers = [h.strip().replace('-', '_').lower()
                                 for h in conf.get(
                                     "metadata_headers",
                                     "").split(",") if h.strip()]

        service.prepare_service()
        publisher_manager = dispatch.NameDispatchExtensionManager(
            namespace=pipeline.PUBLISHER_NAMESPACE,
            check_func=lambda x: True,
            invoke_on_load=True,
        )

        self.pipeline_manager = pipeline.setup_pipeline(publisher_manager)
Пример #42
0
    def start(self):
        self.pipeline_manager = pipeline.setup_pipeline()

        self.partition_coordinator.start()
        self.join_partitioning_groups()

        # allow time for coordination if necessary
        delay_start = self.partition_coordinator.is_active()

        for interval, task in six.iteritems(self.setup_polling_tasks()):
            self.tg.add_timer(interval,
                              self.interval_task,
                              initial_delay=interval if delay_start else None,
                              task=task)
        self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                          self.partition_coordinator.heartbeat)
Пример #43
0
    def __init__(self, app, conf):
        self.app = app
        self.logger = utils.get_logger(conf, log_route='ceilometer')

        self.metadata_headers = [
            h.strip().replace('-', '_').lower()
            for h in conf.get("metadata_headers", "").split(",") if h.strip()
        ]

        service.prepare_service([])

        self.pipeline_manager = pipeline.setup_pipeline(
            transformer.TransformerExtensionManager(
                'ceilometer.transformer', ), )
        self.reseller_prefix = conf.get('reseller_prefix', 'AUTH_')
        if self.reseller_prefix and self.reseller_prefix[-1] != '_':
            self.reseller_prefix += '_'
Пример #44
0
def send_sample():
    cfg.CONF.register_cli_opts(
        [
            cfg.StrOpt("sample-name", short="n", help="Meter name.", required=True),
            cfg.StrOpt(
                "sample-type", short="y", help="Meter type (gauge, delta, cumulative).", default="gauge", required=True
            ),
            cfg.StrOpt("sample-unit", short="U", help="Meter unit."),
            cfg.IntOpt("sample-volume", short="l", help="Meter volume value.", default=1),
            cfg.StrOpt("sample-resource", short="r", help="Meter resource id.", required=True),
            cfg.StrOpt("sample-user", short="u", help="Meter user id."),
            cfg.StrOpt("sample-project", short="p", help="Meter project id."),
            cfg.StrOpt("sample-timestamp", short="i", help="Meter timestamp.", default=timeutils.utcnow().isoformat()),
            cfg.StrOpt("sample-metadata", short="m", help="Meter metadata."),
        ]
    )

    service.prepare_service()

    # Set up logging to use the console
    console = logging.StreamHandler(sys.stderr)
    console.setLevel(logging.DEBUG)
    formatter = logging.Formatter("%(message)s")
    console.setFormatter(formatter)
    root_logger = logging.getLogger("")
    root_logger.addHandler(console)
    root_logger.setLevel(logging.DEBUG)

    pipeline_manager = pipeline.setup_pipeline(transformer.TransformerExtensionManager("ceilometer.transformer"))

    with pipeline_manager.publisher(context.get_admin_context()) as p:
        p(
            [
                sample.Sample(
                    name=cfg.CONF.sample_name,
                    type=cfg.CONF.sample_type,
                    unit=cfg.CONF.sample_unit,
                    volume=cfg.CONF.sample_volume,
                    user_id=cfg.CONF.sample_user,
                    project_id=cfg.CONF.sample_project,
                    resource_id=cfg.CONF.sample_resource,
                    timestamp=cfg.CONF.sample_timestamp,
                    resource_metadata=cfg.CONF.sample_metadata and eval(cfg.CONF.sample_metadata),
                )
            ]
        )
Пример #45
0
    def __init__(self, app, conf):
        self.app = app

        self.metadata_headers = [h.strip().replace('-', '_').lower()
                                 for h in conf.get(
                                     "metadata_headers",
                                     "").split(",") if h.strip()]

        self.logger = logging.getLogger('ceilometer')
        self.logger.setLevel(getattr(logging,
                                     conf.get('log_level', 'WARN').upper()))
        service.prepare_service([])

        self.pipeline_manager = pipeline.setup_pipeline()
        self.reseller_prefix = conf.get('reseller_prefix', 'AUTH_')
        if self.reseller_prefix and self.reseller_prefix[-1] != '_':
            self.reseller_prefix += '_'
Пример #46
0
    def initialize_service_hook(self, service):
        """Consumers must be declared before consume_thread start."""
        self.pipeline_manager = pipeline.setup_pipeline(
            transformer.TransformerExtensionManager("ceilometer.transformer")
        )

        LOG.debug(_("Loading event definitions"))
        self.event_converter = event_converter.setup_events(
            extension.ExtensionManager(namespace="ceilometer.event.trait_plugin")
        )

        self.notification_manager = extension.ExtensionManager(
            namespace=self.NOTIFICATION_NAMESPACE, invoke_on_load=True
        )

        if not list(self.notification_manager):
            LOG.warning(_("Failed to load any notification handlers for %s"), self.NOTIFICATION_NAMESPACE)
        self.notification_manager.map(self._setup_subscription)
Пример #47
0
    def initialize_service_hook(self, service):
        '''Consumers must be declared before consume_thread start.'''
        self.pipeline_manager = pipeline.setup_pipeline(
            transformer.TransformerExtensionManager(
                'ceilometer.transformer',
            ),
        )

        self.notification_manager = \
            extension.ExtensionManager(
                namespace=self.NOTIFICATION_NAMESPACE,
                invoke_on_load=True,
            )

        if not list(self.notification_manager):
            LOG.warning('Failed to load any notification handlers for %s',
                        self.NOTIFICATION_NAMESPACE)
        self.notification_manager.map(self._setup_subscription)
Пример #48
0
    def initialize_service_hook(self, service):
        '''Consumers must be declared before consume_thread start.'''
        self.pipeline_manager = pipeline.setup_pipeline()

        LOG.debug(_('Loading event definitions'))
        self.event_converter = event_converter.setup_events(
            extension.ExtensionManager(
                namespace='ceilometer.event.trait_plugin'))

        self.notification_manager = \
            extension.ExtensionManager(
                namespace=self.NOTIFICATION_NAMESPACE,
                invoke_on_load=True,
            )

        if not list(self.notification_manager):
            LOG.warning(_('Failed to load any notification handlers for %s'),
                        self.NOTIFICATION_NAMESPACE)
        self.notification_manager.map(self._setup_subscription)
Пример #49
0
    def start(self):
        super(NotificationService, self).start()
        self.pipeline_manager = pipeline.setup_pipeline()

        self.notification_manager = self._get_notifications_manager(
            self.pipeline_manager)
        if not list(self.notification_manager):
            LOG.warning(_('Failed to load any notification handlers for %s'),
                        self.NOTIFICATION_NAMESPACE)

        ack_on_error = cfg.CONF.notification.ack_on_event_error

        endpoints = []
        if cfg.CONF.notification.store_events:
            endpoints = [event_endpoint.EventsNotificationEndpoint()]

        targets = []
        for ext in self.notification_manager:
            handler = ext.obj
            LOG.debug(
                _('Event types from %(name)s: %(type)s'
                  ' (ack_on_error=%(error)s)') % {
                      'name': ext.name,
                      'type': ', '.join(handler.event_types),
                      'error': ack_on_error
                  })
            targets.extend(handler.get_targets(cfg.CONF))
            endpoints.append(handler)

        urls = cfg.CONF.notification.messaging_urls or [None]
        self.listeners = []
        for url in urls:
            listener = messaging.get_notification_listener(
                targets, endpoints, url)
            listener.start()
            self.listeners.append(listener)

        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
Пример #50
0
    def start(self):
        self.pipeline_manager = publish_pipeline.setup_pipeline()

        self.partition_coordinator.start()
        self.join_partitioning_groups()

        # allow time for coordination if necessary
        delay_start = self.partition_coordinator.is_active()

        # set shuffle time before polling task if necessary
        delay_polling_time = random.randint(
            0, cfg.CONF.shuffle_time_before_polling_task)

        for interval, task in six.iteritems(self.setup_polling_tasks()):
            delay_time = (interval + delay_polling_time if delay_start
                          else delay_polling_time)
            self.tg.add_timer(interval,
                              self.interval_task,
                              initial_delay=delay_time,
                              task=task)
        self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                          self.partition_coordinator.heartbeat)
Пример #51
0
    def start(self):
        self.pipeline_manager = publish_pipeline.setup_pipeline()

        self.partition_coordinator.start()
        self.join_partitioning_groups()

        # allow time for coordination if necessary
        delay_start = self.partition_coordinator.is_active()

        # set shuffle time before polling task if necessary
        delay_polling_time = random.randint(
            0, cfg.CONF.shuffle_time_before_polling_task)

        for interval, task in six.iteritems(self.setup_polling_tasks()):
            delay_time = (interval + delay_polling_time
                          if delay_start else delay_polling_time)
            self.tg.add_timer(interval,
                              self.interval_task,
                              initial_delay=delay_time,
                              task=task)
        self.tg.add_timer(cfg.CONF.coordination.heartbeat,
                          self.partition_coordinator.heartbeat)
Пример #52
0
    def start(self):
        super(NotificationService, self).start()
        self.pipeline_manager = pipeline.setup_pipeline()

        self.notification_manager = self._get_notifications_manager(
            self.pipeline_manager)
        if not list(self.notification_manager):
            LOG.warning(_('Failed to load any notification handlers for %s'),
                        self.NOTIFICATION_NAMESPACE)

        ack_on_error = cfg.CONF.notification.ack_on_event_error

        endpoints = []
        if cfg.CONF.notification.store_events:
            endpoints = [event_endpoint.EventsNotificationEndpoint()]

        targets = []
        for ext in self.notification_manager:
            handler = ext.obj
            LOG.debug(_('Event types from %(name)s: %(type)s'
                        ' (ack_on_error=%(error)s)') %
                      {'name': ext.name,
                       'type': ', '.join(handler.event_types),
                       'error': ack_on_error})
            targets.extend(handler.get_targets(cfg.CONF))
            endpoints.append(handler)

        urls = cfg.CONF.notification.messaging_urls or [None]
        self.listeners = []
        for url in urls:
            listener = messaging.get_notification_listener(targets,
                                                           endpoints,
                                                           url)
            listener.start()
            self.listeners.append(listener)

        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
Пример #53
0
    def __init__(self, app, conf):
        self.app = app
        self.publish_incoming_bytes = conf.get("publish_incoming_bytes", True)
        self.publish_outgoing_bytes = conf.get("publish_outgoing_bytes", True)
        self.publish_on_error = conf.get("publish_on_error", False)
        self.enable_filters = conf.get("enable_filters", True)
        self.error_on_status = [
            status.strip()
            for status in conf.get("error_on_status", '').split('\n')
        ]
        self.logger = utils.get_logger(conf, log_route='ceilometer')

        self.metadata_headers = [
            h.strip().replace('-', '_').lower()
            for h in conf.get("metadata_headers", "").split(",") if h.strip()
        ]

        service.prepare_service([])

        self.pipeline_manager = pipeline.setup_pipeline()
        self.reseller_prefix = conf.get('reseller_prefix', 'AUTH_')
        if self.reseller_prefix and self.reseller_prefix[-1] != '_':
            self.reseller_prefix += '_'
Пример #54
0
def send_sample():
    cfg.CONF.register_cli_opts([
        cfg.StrOpt('sample-name',
                   short='n',
                   help='Meter name.',
                   required=True),
        cfg.StrOpt('sample-type',
                   short='y',
                   help='Meter type (gauge, delta, cumulative).',
                   default='gauge',
                   required=True),
        cfg.StrOpt('sample-unit',
                   short='U',
                   help='Meter unit.',
                   default=None),
        cfg.IntOpt('sample-volume',
                   short='l',
                   help='Meter volume value.',
                   default=1),
        cfg.StrOpt('sample-resource',
                   short='r',
                   help='Meter resource id.',
                   required=True),
        cfg.StrOpt('sample-user',
                   short='u',
                   help='Meter user id.'),
        cfg.StrOpt('sample-project',
                   short='p',
                   help='Meter project id.'),
        cfg.StrOpt('sample-timestamp',
                   short='i',
                   help='Meter timestamp.',
                   default=timeutils.utcnow().isoformat()),
        cfg.StrOpt('sample-metadata',
                   short='m',
                   help='Meter metadata.'),
    ])

    service.prepare_service()

    # Set up logging to use the console
    console = logging.StreamHandler(sys.stderr)
    console.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    root_logger = logging.getLogger('')
    root_logger.addHandler(console)
    root_logger.setLevel(logging.DEBUG)

    pipeline_manager = pipeline.setup_pipeline(
        transformer.TransformerExtensionManager(
            'ceilometer.transformer',
        ),
    )

    with pipeline_manager.publisher(context.get_admin_context()) as p:
        p([sample.Sample(
            name=cfg.CONF.sample_name,
            type=cfg.CONF.sample_type,
            unit=cfg.CONF.sample_unit,
            volume=cfg.CONF.sample_volume,
            user_id=cfg.CONF.sample_user,
            project_id=cfg.CONF.sample_project,
            resource_id=cfg.CONF.sample_resource,
            timestamp=cfg.CONF.sample_timestamp,
            resource_metadata=cfg.CONF.sample_metadata and eval(
                cfg.CONF.sample_metadata))])
Пример #55
0
    def run(self):
        super(NotificationService, self).run()
        self.shutdown = False
        self.periodic = None
        self.partition_coordinator = None
        self.coord_lock = threading.Lock()

        self.listeners = []

        # NOTE(kbespalov): for the pipeline queues used a single amqp host
        # hence only one listener is required
        self.pipeline_listener = None

        self.pipeline_manager = pipeline.setup_pipeline(self.conf)

        self.event_pipeline_manager = pipeline.setup_event_pipeline(self.conf)

        self.transport = messaging.get_transport(self.conf)

        if self.conf.notification.workload_partitioning:
            self.group_id = self.NOTIFICATION_NAMESPACE
            self.partition_coordinator = coordination.PartitionCoordinator(
                self.conf)
            self.partition_coordinator.start()
        else:
            # FIXME(sileht): endpoint uses the notification_topics option
            # and it should not because this is an oslo_messaging option
            # not a ceilometer. Until we have something to get the
            # notification_topics in another way, we must create a transport
            # to ensure the option has been registered by oslo_messaging.
            messaging.get_notifier(self.transport, '')
            self.group_id = None

        self.pipe_manager = self._get_pipe_manager(self.transport,
                                                   self.pipeline_manager)
        self.event_pipe_manager = self._get_event_pipeline_manager(
            self.transport)

        self._configure_main_queue_listeners(self.pipe_manager,
                                             self.event_pipe_manager)

        if self.conf.notification.workload_partitioning:
            # join group after all manager set up is configured
            self.partition_coordinator.join_group(self.group_id)
            self.partition_coordinator.watch_group(self.group_id,
                                                   self._refresh_agent)

            @periodics.periodic(spacing=self.conf.coordination.heartbeat,
                                run_immediately=True)
            def heartbeat():
                self.partition_coordinator.heartbeat()

            @periodics.periodic(spacing=self.conf.coordination.check_watchers,
                                run_immediately=True)
            def run_watchers():
                self.partition_coordinator.run_watchers()

            self.periodic = periodics.PeriodicWorker.create(
                [],
                executor_factory=lambda: futures.ThreadPoolExecutor(max_workers
                                                                    =10))
            self.periodic.add(heartbeat)
            self.periodic.add(run_watchers)

            utils.spawn_thread(self.periodic.start)

            # configure pipelines after all coordination is configured.
            with self.coord_lock:
                self._configure_pipeline_listener()

        if not self.conf.notification.disable_non_metric_meters:
            LOG.warning(
                _LW('Non-metric meters may be collected. It is highly '
                    'advisable to disable these meters using '
                    'ceilometer.conf or the pipeline.yaml'))

        self.init_pipeline_refresh()
Пример #56
0
def main():

    service.prepare_service()

    print "I'm CW-AGENT!"

    LOG.info(
        "###########################################################################################"
    )
    LOG.info("\n")
    LOG.info("CW -> I'm CW-AGENT!")
    LOG.info("\n")
    LOG.info(
        "###########################################################################################\n"
    )

    global amqp_compute_ip
    amqp_compute_ip = cfg.CONF.cloudwave.amqp_compute_ip
    global exchange
    exchange = cfg.CONF.cloudwave.cw_exchange  #"CloudWave"
    global timeout_recon
    timeout_recon = float(cfg.CONF.cloudwave.timeout_recon)
    global heartbeat
    heartbeat = int(cfg.CONF.cloudwave.heartbeat)
    global rabbitmq_user
    rabbitmq_user = cfg.CONF.cloudwave.rabbitmq_user
    global rabbitmq_password
    rabbitmq_password = cfg.CONF.cloudwave.rabbitmq_password
    global monitoring_interval
    monitoring_interval = float(cfg.CONF.cloudwave.monitoring_interval)

    #PIPELINE INIT
    LOG.info(
        "###########################################################################################"
    )
    LOG.info("CW -> Ceilometer's pipelines initialization....")
    LOG.info(
        "###########################################################################################"
    )
    global pipeline_manager
    global meter_pipe
    global metering_context
    pipeline_manager = publish_pipeline.setup_pipeline()
    meter_pipe = pipeline_manager.pipelines[0]  #meter_source:meter_sink
    metering_context = context.RequestContext('admin', 'admin', is_admin=True)
    LOG.info(
        "###########################################################################################\n"
    )

    LOG.info("CW -> CW-AGENT PARAMETERS:")
    LOG.info('\tRabbitMQ broker: %s', amqp_compute_ip)
    LOG.info('\tRabbitMQ CloudWave Topic Exchange: %s', exchange)
    LOG.info('\tRabbitMQ user: %s', rabbitmq_user)
    LOG.info('\tRabbitMQ password: %s', rabbitmq_password)
    LOG.info('\tRabbitMQ Heartbeat time interval: %s seconds',
             float(heartbeat))
    LOG.info('\tInstances Monitoring time interval: %s seconds',
             monitoring_interval)
    LOG.info('\tReconnection to RabbitMQ broker time intenterval: %s seconds',
             timeout_recon)
    LOG.info('\tCeilometer pipeline: %s', meter_pipe)
    """
        print "CW -> CW-AGENT PARAMETERS:"
        print '\tRabbitMQ broker: %s', amqp_compute_ip
        print '\tRabbitMQ CloudWave Topic Exchange: %s', exchange
        print '\tRabbitMQ user: %s', rabbitmq_user
        print '\tRabbitMQ password: %s', rabbitmq_password
        print '\tRabbitMQ Heartbeat time interval: %s seconds', float(heartbeat)
        print '\tInstances Monitoring time interval: %s seconds', monitoring_interval
        print '\tReconnection to RabbitMQ broker time intenterval: %s seconds', timeout_recon
	"""

    global connection
    connection = ioloop_connect()

    try:
        # Loop so we can communicate with RabbitMQ
        connection.ioloop.start()

    except KeyboardInterrupt:
        # Gracefully close the connection
        connection.close()
        # Loop until we're fully closed, will stop on its own
        connection.ioloop.start()
Пример #57
0
 def __init__(self):
     # this is done here as the cfg options are not available
     # when the file is imported.
     self.pipeline_manager = pipeline.setup_pipeline()