示例#1
0
    def refresh_pipeline(self):
        mtime = pipeline.get_pipeline_mtime()
        if mtime > self.pipeline_mtime:
            LOG.info(_LI('Pipeline configuration file has been updated.'))

            self.pipeline_mtime = mtime
            _hash = pipeline.get_pipeline_hash()

            if _hash != self.pipeline_hash:
                LOG.info(_LI("Detected change in pipeline configuration."))

                try:
                    # Pipeline in the notification agent.
                    if hasattr(self, 'pipeline_manager'):
                        self.pipeline_manager = pipeline.setup_pipeline()
                    # Polling in the polling agent.
                    elif hasattr(self, 'polling_manager'):
                        self.polling_manager = pipeline.setup_polling()
                    LOG.debug("Pipeline has been refreshed. "
                              "old hash: %(old)s, new hash: %(new)s",
                              {'old': self.pipeline_hash,
                               'new': _hash})
                except Exception as err:
                    LOG.debug("Active pipeline config's hash is %s",
                              self.pipeline_hash)
                    LOG.exception(_LE('Unable to load changed pipeline: %s')
                                  % err)
                    return

                self.pipeline_hash = _hash
                self.reload_pipeline()
示例#2
0
def expirer():
    service.prepare_service()

    if cfg.CONF.database.metering_time_to_live > 0:
        LOG.debug(_("Clearing expired metering data"))
        storage_conn = storage.get_connection_from_config(cfg.CONF, 'metering')
        storage_conn.clear_expired_metering_data(
            cfg.CONF.database.metering_time_to_live)
    else:
        LOG.info(_LI("Nothing to clean, database metering time to live "
                     "is disabled"))

    if cfg.CONF.database.event_time_to_live > 0:
        LOG.debug(_("Clearing expired event data"))
        event_conn = storage.get_connection_from_config(cfg.CONF, 'event')
        event_conn.clear_expired_event_data(
            cfg.CONF.database.event_time_to_live)
    else:
        LOG.info(_LI("Nothing to clean, database event time to live "
                     "is disabled"))

    if cfg.CONF.database.alarm_history_time_to_live > 0:
        LOG.debug("Clearing expired alarm history data")
        storage_conn = storage.get_connection_from_config(cfg.CONF, 'alarm')
        storage_conn.clear_expired_alarm_history_data(
            cfg.CONF.database.alarm_history_time_to_live)
    else:
        LOG.info(_LI("Nothing to clean, database alarm history time to live "
                     "is disabled"))
示例#3
0
 def cfg_changed(self):
     """Returns hash of changed cfg else False."""
     mtime = self.get_cfg_mtime()
     if mtime > self.cfg_mtime:
         LOG.info(_LI('Configuration file has been updated.'))
         self.cfg_mtime = mtime
         _hash = self.get_cfg_hash()
         if _hash != self.cfg_hash:
             LOG.info(_LI("Detected change in configuration."))
             return _hash
     return False
示例#4
0
    def clear_expired_metering_data(self, ttl):
        """Clear expired data from the backend storage system.

        Clearing occurs according to the time-to-live.
        :param ttl: Number of seconds to keep records for.
        """
        # Prevent database deadlocks from occurring by
        # using separate transaction for each delete
        session = self._engine_facade.get_session()
        with session.begin():
            end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
            sample_q = (session.query(models.Sample)
                        .filter(models.Sample.timestamp < end))
            rows = sample_q.delete()
            LOG.info(_LI("%d samples removed from database"), rows)

        if not cfg.CONF.sql_expire_samples_only:
            with session.begin():
                # remove Meter definitions with no matching samples
                (session.query(models.Meter)
                 .filter(~models.Meter.samples.any())
                 .delete(synchronize_session=False))

            with session.begin():
                resource_q = (session.query(models.Resource.internal_id)
                              .filter(~models.Resource.samples.any()))
                # mark resource with no matching samples for delete
                resource_q.update({models.Resource.metadata_hash: "delete_"
                                  + cast(models.Resource.internal_id,
                                         sa.String)},
                                  synchronize_session=False)

            # remove metadata of resources marked for delete
            for table in [models.MetaText, models.MetaBigInt,
                          models.MetaFloat, models.MetaBool]:
                with session.begin():
                    resource_q = (session.query(models.Resource.internal_id)
                                  .filter(models.Resource.metadata_hash
                                          .like('delete_%')))
                    resource_subq = resource_q.subquery()
                    (session.query(table)
                     .filter(table.id.in_(resource_subq))
                     .delete(synchronize_session=False))

            # remove resource marked for delete
            with session.begin():
                resource_q = (session.query(models.Resource.internal_id)
                              .filter(models.Resource.metadata_hash
                                      .like('delete_%')))
                resource_q.delete(synchronize_session=False)
            LOG.info(_LI("Expired residual resource and"
                         " meter definition data"))
示例#5
0
    def pipeline_changed(self, p_type=pipeline.SAMPLE_TYPE):
        """Returns hash of changed pipeline else False."""

        pipeline_mtime = self.get_pipeline_mtime(p_type)
        mtime = pipeline.get_pipeline_mtime(p_type)
        if mtime > pipeline_mtime:
            LOG.info(_LI('Pipeline configuration file has been updated.'))

            self.set_pipeline_mtime(mtime, p_type)
            _hash = pipeline.get_pipeline_hash(p_type)
            pipeline_hash = self.get_pipeline_hash(p_type)
            if _hash != pipeline_hash:
                LOG.info(_LI("Detected change in pipeline configuration."))
                return _hash
        return False
示例#6
0
    def clear_expired_event_data(self, ttl):
        """Clear expired data from the backend storage system.

        Clearing occurs according to the time-to-live.

        :param ttl: Number of seconds to keep records for.
        """
        session = self._engine_facade.get_session()
        with session.begin():
            end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
            event_q = (session.query(models.Event.id)
                       .filter(models.Event.generated < end))

            event_subq = event_q.subquery()
            for trait_model in [models.TraitText, models.TraitInt,
                                models.TraitFloat, models.TraitDatetime]:
                (session.query(trait_model)
                 .filter(trait_model.event_id.in_(event_subq))
                 .delete(synchronize_session="fetch"))
            event_rows = event_q.delete()

            # remove EventType and TraitType with no corresponding
            # matching events and traits
            (session.query(models.EventType)
             .filter(~models.EventType.events.any())
             .delete(synchronize_session="fetch"))
            LOG.info(_LI("%d events are removed from database"), event_rows)
示例#7
0
def setup_meters_config():
    """Setup the meters definitions from yaml config file."""
    config_file = get_config_file()
    if config_file is not None:
        LOG.debug(_LE("Meter Definitions configuration file: %s"), config_file)

        with open(config_file) as cf:
            config = cf.read()

        try:
            meters_config = yaml.safe_load(config)
        except yaml.YAMLError as err:
            if hasattr(err, "problem_mark"):
                mark = err.problem_mark
                errmsg = _LE(
                    "Invalid YAML syntax in Meter Definitions file " "%(file)s at line: %(line)s, column: %(column)s."
                ) % dict(file=config_file, line=mark.line + 1, column=mark.column + 1)
            else:
                errmsg = _LE("YAML error reading Meter Definitions file " "%(file)s") % dict(file=config_file)
            LOG.error(errmsg)
            raise

    else:
        LOG.debug(_LE("No Meter Definitions configuration file found!" " Using default config."))
        meters_config = {}

    LOG.info(_LI("Meter Definitions: %s"), meters_config)

    return meters_config
示例#8
0
 def leave_group(self, group_id):
     if group_id not in self._groups:
         return
     if self._coordinator:
         self._coordinator.leave_group(group_id)
         self._groups.remove(group_id)
         LOG.info(_LI('Left partitioning group %s'), group_id)
示例#9
0
    def reload_pipeline(self):
        LOG.info(_LI("Reloading notification agent and listeners."))

        if self.pipeline_validated:
            self.pipe_manager = self._get_pipe_manager(
                self.transport, self.pipeline_manager)

        if self.event_pipeline_validated:
            self.event_pipe_manager = self._get_event_pipeline_manager(
                self.transport)

        with self.coord_lock:
            if self.shutdown:
                # NOTE(sileht): We are going to shutdown we everything will be
                # stopped, we should not restart them
                return

            # restart the main queue listeners.
            utils.kill_listeners(self.listeners)
            self._configure_main_queue_listeners(
                self.pipe_manager, self.event_pipe_manager)

            # restart the pipeline listeners if workload partitioning
            # is enabled.
            if cfg.CONF.notification.workload_partitioning:
                self._configure_pipeline_listener()
示例#10
0
    def __init__(self, parsed_url):
        self.kafka_client = None

        self.host, self.port = netutils.parse_host_port(
            parsed_url.netloc, default_port=9092)

        self.local_queue = []

        params = urlparse.parse_qs(parsed_url.query)
        self.topic = params.get('topic', ['ceilometer'])[-1]
        self.policy = params.get('policy', ['default'])[-1]
        self.max_queue_length = int(params.get(
            'max_queue_length', [1024])[-1])
        self.max_retry = int(params.get('max_retry', [100])[-1])

        if self.policy in ['default', 'drop', 'queue']:
            LOG.info(_LI('Publishing policy set to %s') % self.policy)
        else:
            LOG.warn(_LW('Publishing policy is unknown (%s) force to default')
                     % self.policy)
            self.policy = 'default'

        try:
            self._get_client()
        except Exception as e:
            LOG.exception(_LE("Failed to connect to Kafka service: %s"), e)
    def clear_expired_metering_data(self, ttl):
        """Clear expired data from the backend storage system.

        Clearing occurs according to the time-to-live.
        :param ttl: Number of seconds to keep records for.
        """
        LOG.info(_LI("Dropping metering data with TTL %d"), ttl)
 def create_index(self, keys, name=None, *args, **kwargs):
     try:
         self.conn.create_index(keys, name=name, *args, **kwargs)
     except pymongo.errors.OperationFailure as e:
         if e.code is ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS:
             LOG.info(_LI("Index %s will be recreate.") % name)
             self._recreate_index(keys, name, *args, **kwargs)
示例#13
0
    def record_events(self, event_models):
        """Write the events to database.

        :param event_models: a list of models.Event objects.
        """
        error = None
        for event_model in event_models:
            traits = []
            if event_model.traits:
                for trait in event_model.traits:
                    traits.append({'trait_name': trait.name,
                                   'trait_type': trait.dtype,
                                   'trait_value': trait.value})
            try:
                self.db.event.insert_one(
                    {'_id': event_model.message_id,
                     'event_type': event_model.event_type,
                     'timestamp': event_model.generated,
                     'traits': traits, 'raw': event_model.raw})
            except pymongo.errors.DuplicateKeyError as ex:
                LOG.info(_LI("Duplicate event detected, skipping it: %s") % ex)
            except Exception as ex:
                LOG.exception(_LE("Failed to record event: %s") % ex)
                error = ex
        if error:
            raise error
示例#14
0
    def record_events(self, events):

        def _build_bulk_index(event_list):
            for ev in event_list:
                traits = {t.name: t.value for t in ev.traits}
                yield {'_op_type': 'create',
                       '_index': '%s_%s' % (self.index_name,
                                            ev.generated.date().isoformat()),
                       '_type': ev.event_type,
                       '_id': ev.message_id,
                       '_source': {'timestamp': ev.generated.isoformat(),
                                   'traits': traits,
                                   'raw': ev.raw}}

        error = None
        for ok, result in helpers.streaming_bulk(
                self.conn, _build_bulk_index(events)):
            if not ok:
                __, result = result.popitem()
                if result['status'] == 409:
                    LOG.info(_LI('Duplicate event detected, skipping it: %s'),
                             result)
                else:
                    LOG.exception(_LE('Failed to record event: %s'), result)
                    error = storage.StorageUnknownWriteError(result)

        if self._refresh_on_write:
            self.conn.indices.refresh(index='%s_*' % self.index_name)
            while self.conn.cluster.pending_tasks(local=True)['tasks']:
                pass
        if error:
            raise error
示例#15
0
def setup_meters_config():
    """load the meters definitions from yaml config file."""
    config_file = get_config_file()

    LOG.debug("Hardware snmp meter definition file: %s" % config_file)
    with open(config_file) as cf:
        config = cf.read()

    try:
        meters_config = yaml.safe_load(config)
    except yaml.YAMLError as err:
        if hasattr(err, 'problem_mark'):
            mark = err.problem_mark
            errmsg = (_LE("Invalid YAML syntax in Meter Definitions file "
                      "%(file)s at line: %(line)s, column: %(column)s.")
                      % dict(file=config_file,
                             line=mark.line + 1,
                             column=mark.column + 1))
        else:
            errmsg = (_LE("YAML error reading Meter Definitions file "
                      "%(file)s")
                      % dict(file=config_file))
        LOG.error(errmsg)
        raise

    LOG.info(_LI("Meter Definitions: %s") % meters_config)

    return meters_config
示例#16
0
 def _evaluate_assigned_alarms(self):
     try:
         alarms = self._assigned_alarms()
         LOG.info(_LI('initiating evaluation cycle on %d alarms') %
                  len(alarms))
         for alarm in alarms:
             self._evaluate_alarm(alarm)
     except Exception:
         LOG.exception(_('alarm evaluation cycle failed'))
示例#17
0
def enforce_limit(limit):
    """Ensure limit is defined and is valid. if not, set a default."""
    if limit is None:
        limit = cfg.CONF.api.default_api_return_limit
        LOG.info(_LI('No limit value provided, result set will be'
                     ' limited to %(limit)d.'), {'limit': limit})
    if not limit or limit <= 0:
        raise base.ClientSideError(_("Limit must be positive"))
    return limit
示例#18
0
def build_server():
    app = load_app()
    # Create the WSGI server and start it
    host, port = cfg.CONF.api.host, cfg.CONF.api.port

    LOG.info(_LI('Starting server in PID %s') % os.getpid())
    LOG.info(_LI("Configuration:"))
    cfg.CONF.log_opt_values(LOG, log.INFO)

    if host == '0.0.0.0':
        LOG.info(_LI(
            'serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s')
            % ({'sport': port, 'vport': port}))
    else:
        LOG.info(_LI("serving on http://%(host)s:%(port)s") % (
                 {'host': host, 'port': port}))

    serving.run_simple(cfg.CONF.api.host, cfg.CONF.api.port,
                       app, processes=CONF.api.workers)
示例#19
0
 def start(self):
     backend_url = cfg.CONF.coordination.backend_url
     if backend_url:
         try:
             self._coordinator = tooz.coordination.get_coordinator(
                 backend_url, self._my_id)
             self._coordinator.start()
             LOG.info(_LI('Coordination backend started successfully.'))
         except tooz.coordination.ToozError:
             LOG.exception(_LE('Error connecting to coordination backend.'))
示例#20
0
    def clear_expired_alarm_history_data(self, alarm_history_ttl):
        """Clear expired alarm history data from the backend storage system.

        Clearing occurs according to the time-to-live.

        :param alarm_history_ttl: Number of seconds to keep alarm history
                                  records for.
        """
        LOG.info(_LI('Dropping alarm history data with TTL %d'),
                 alarm_history_ttl)
    def process_sample_for_monasca(self, sample_obj):
        if not self._mapping:
            raise NoMappingsFound("Unable to process the sample")

        dimensions = {}
        if isinstance(sample_obj, sample_util.Sample):
            sample = sample_obj.as_dict()
        elif isinstance(sample_obj, dict):
            if 'counter_name' in sample_obj:
                sample = self._convert_to_sample(sample_obj)
            else:
                sample = sample_obj

        for dim in self._mapping['dimensions']:
            val = sample.get(dim, None)
            if val:
                dimensions[dim] = val

        sample_meta = sample.get('resource_metadata', None)
        value_meta = {}

        meter_name = sample.get('name') or sample.get('counter_name')
        if sample_meta:
            for meta_key in self._mapping['metadata']['common']:
                val = sample_meta.get(meta_key, None)
                if val:
                    value_meta[meta_key] = val

            if meter_name in self._mapping['metadata'].keys():
                for meta_key in self._mapping['metadata'][meter_name]:
                    val = sample_meta.get(meta_key, None)
                    if val:
                        value_meta[meta_key] = val

        meter_value = sample.get('volume') or sample.get('counter_volume')
        if meter_value is None:
            meter_value = 0

        metric = dict(
            name=meter_name,
            timestamp=self._convert_timestamp(sample['timestamp']),
            value=meter_value,
            dimensions=dimensions,
            value_meta=value_meta,
        )

        LOG.debug(_LI("Generated metric with name %(name)s,"
                      " timestamp %(timestamp)s, value %(value)s,"
                      " dimensions %(dimensions)s") %
                  {'name': metric['name'],
                   'timestamp': metric['timestamp'],
                   'value': metric['value'],
                   'dimensions': metric['dimensions']})

        return metric
示例#22
0
def expirer():
    conf = service.prepare_service()

    if conf.database.metering_time_to_live > 0:
        LOG.debug("Clearing expired metering data")
        storage_conn = storage.get_connection_from_config(conf, 'metering')
        storage_conn.clear_expired_metering_data(
            conf.database.metering_time_to_live)
    else:
        LOG.info(_LI("Nothing to clean, database metering time to live "
                     "is disabled"))

    if conf.database.event_time_to_live > 0:
        LOG.debug("Clearing expired event data")
        event_conn = storage.get_connection_from_config(conf, 'event')
        event_conn.clear_expired_event_data(
            conf.database.event_time_to_live)
    else:
        LOG.info(_LI("Nothing to clean, database event time to live "
                     "is disabled"))
    def record_metering_data(self, data):
        """Write the data to the backend storage system.

        :param data: a dictionary such as returned by
                     ceilometer.meter.meter_message_from_counter.
        """
        LOG.info(_LI('metering data %(counter_name)s for %(resource_id)s: '
                     '%(counter_volume)s')
                 % ({'counter_name': data['counter_name'],
                     'resource_id': data['resource_id'],
                     'counter_volume': data['counter_volume']}))
示例#24
0
 def notify(action, alarm_id, alarm_name, severity, previous, current,
            reason, reason_data):
     LOG.info(_LI(
         "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s "
         "priority from %(previous)s to %(current)s with action %(action)s"
         " because %(reason)s.") % ({'alarm_name': alarm_name,
                                     'alarm_id': alarm_id,
                                     'severity': severity,
                                     'previous': previous,
                                     'current': current,
                                     'action': action,
                                     'reason': reason}))
示例#25
0
文件: base.py 项目: r-mibu/ceilometer
    def reload_pipeline(self):
        LOG.info(_LI("Reconfiguring polling tasks."))

        # stop existing pollsters and leave partitioning groups
        self.stop_pollsters()
        for group in self.groups:
            self.partition_coordinator.leave_group(group)

        # re-create partitioning groups according to pipeline
        # and configure polling tasks with latest pipeline conf
        self.join_partitioning_groups()
        self.pollster_timers = self.configure_polling_tasks()
示例#26
0
 def _get_endpoint(conf, ksclient):
     # we store the endpoint as a base class attribute, so keystone is
     # only ever called once
     if _Base._ENDPOINT is None:
         try:
             creds = conf.service_credentials
             _Base._ENDPOINT = keystone_client.get_service_catalog(ksclient).url_for(
                 service_type=conf.service_types.swift, interface=creds.interface, region_name=creds.region_name
             )
         except exceptions.EndpointNotFound as e:
             LOG.info(_LI("Swift endpoint not found: %s"), e)
     return _Base._ENDPOINT
示例#27
0
def _setup_polling_manager(cfg_file):
    if not os.path.exists(cfg_file):
        cfg_file = cfg.CONF.find_file(cfg_file)

    LOG.debug("Polling config file: %s", cfg_file)

    with open(cfg_file) as fap:
        data = fap.read()

    pipeline_cfg = yaml.safe_load(data)
    LOG.info(_LI("Pipeline config: %s"), pipeline_cfg)

    return PollingManager(pipeline_cfg)
示例#28
0
def expirer():
    service.prepare_service()

    if cfg.CONF.database.metering_time_to_live > 0:
        LOG.debug("Clearing expired metering data")
        storage_conn = storage.get_connection_from_config(cfg.CONF, 'metering')
        storage_conn.clear_expired_metering_data(
            cfg.CONF.database.metering_time_to_live)
    else:
        LOG.info(
            _LI("Nothing to clean, database metering time to live "
                "is disabled"))

    if cfg.CONF.database.event_time_to_live > 0:
        LOG.debug("Clearing expired event data")
        event_conn = storage.get_connection_from_config(cfg.CONF, 'event')
        event_conn.clear_expired_event_data(
            cfg.CONF.database.event_time_to_live)
    else:
        LOG.info(
            _LI("Nothing to clean, database event time to live "
                "is disabled"))
示例#29
0
    def reload_pipeline(self):
        if self.pipeline_validated:
            LOG.info(_LI("Reconfiguring polling tasks."))

            # stop existing pollsters and leave partitioning groups
            self.stop_pollsters()
            for group in self.groups:
                self.partition_coordinator.leave_group(group)

            # re-create partitioning groups according to pipeline
            # and configure polling tasks with latest pipeline conf
            self.join_partitioning_groups()
            self.pollster_timers = self.configure_polling_tasks()
示例#30
0
 def _get_endpoint(conf, ksclient):
     # we store the endpoint as a base class attribute, so keystone is
     # only ever called once
     if _Base._ENDPOINT is None:
         try:
             creds = conf.service_credentials
             _Base._ENDPOINT = keystone_client.get_service_catalog(
                 ksclient).url_for(service_type=conf.service_types.swift,
                                   interface=creds.interface,
                                   region_name=creds.region_name)
         except exceptions.EndpointNotFound as e:
             LOG.info(_LI("Swift endpoint not found: %s"), e)
     return _Base._ENDPOINT
示例#31
0
def _setup_polling_manager(cfg_file):
    if not os.path.exists(cfg_file):
        cfg_file = cfg.CONF.find_file(cfg_file)

    LOG.debug("Polling config file: %s", cfg_file)

    with open(cfg_file) as fap:
        data = fap.read()

    pipeline_cfg = yaml.safe_load(data)
    LOG.info(_LI("Pipeline config: %s"), pipeline_cfg)

    return PollingManager(pipeline_cfg)
示例#32
0
    def process_sample_for_monasca(self, sample_obj):
        if not self._mapping:
            raise NoMappingsFound("Unable to process the sample")

        dimensions = {}
        if isinstance(sample_obj, sample_util.Sample):
            sample = sample_obj.as_dict()
        elif isinstance(sample_obj, dict):
            if 'counter_name' in sample_obj:
                sample = self._convert_to_sample(sample_obj)
            else:
                sample = sample_obj

        for dim in self._mapping['dimensions']:
            val = sample.get(dim, None)
            if val:
                dimensions[dim] = val

        sample_meta = sample.get('resource_metadata', None)
        value_meta = {}

        meter_name = sample.get('name') or sample.get('counter_name')
        if sample_meta:
            for meta_key in self._mapping['metadata']['common']:
                val = sample_meta.get(meta_key, None)
                if val:
                    value_meta[meta_key] = val

            if meter_name in self._mapping['metadata'].keys():
                for meta_key in self._mapping['metadata'][meter_name]:
                    val = sample_meta.get(meta_key, None)
                    if val:
                        value_meta[meta_key] = val

        metric = dict(
            name=meter_name,
            timestamp=self._convert_timestamp(sample['timestamp']),
            value=sample.get('volume') or sample.get('counter_volume'),
            dimensions=dimensions,
            value_meta=value_meta if value_meta else None,
        )

        LOG.debug(_LI("Generated metric with name %(name)s,"
                      " timestamp %(timestamp)s, value %(value)s,"
                      " dimensions %(dimensions)s") %
                  {'name': metric['name'],
                   'timestamp': metric['timestamp'],
                   'value': metric['value'],
                   'dimensions': metric['dimensions']})

        return metric
示例#33
0
 def notify(action, alarm_id, alarm_name, severity, previous, current,
            reason, reason_data):
     LOG.info(
         _LI("Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s "
             "priority from %(previous)s to %(current)s with action %(action)s"
             " because %(reason)s.") % ({
                 'alarm_name': alarm_name,
                 'alarm_id': alarm_id,
                 'severity': severity,
                 'previous': previous,
                 'current': current,
                 'action': action,
                 'reason': reason
             }))
示例#34
0
    def record_metering_data(self, data):
        """Write the data to the backend storage system.

        :param data: a dictionary such as returned by
                     ceilometer.meter.meter_message_from_counter.
        """
        LOG.info(
            _LI('metering data %(counter_name)s for %(resource_id)s: '
                '%(counter_volume)s') %
            ({
                'counter_name': data['counter_name'],
                'resource_id': data['resource_id'],
                'counter_volume': data['counter_volume']
            }))
示例#35
0
    def reload_pipeline(self):
        LOG.info(_LI("Reloading notification agent and listeners."))

        self.pipe_manager = self._get_pipe_manager(
            self.transport, self.pipeline_manager)

        # re-start the main queue listeners.
        utils.kill_listeners(self.listeners)
        self._configure_main_queue_listeners(
            self.pipe_manager, self.event_pipe_manager)

        # re-start the pipeline listeners if workload partitioning
        # is enabled.
        if cfg.CONF.notification.workload_partitioning:
            self._refresh_agent(None)
示例#36
0
    def _setup_transformers(self, cfg, transformer_manager):
        transformers = []
        for transformer in self.transformer_cfg:
            parameter = transformer["parameters"] or {}
            try:
                ext = transformer_manager[transformer["name"]]
            except KeyError:
                raise PipelineException("No transformer named %s loaded" % transformer["name"], cfg)
            transformers.append(ext.plugin(**parameter))
            LOG.info(
                _LI("Pipeline %(pipeline)s: Setup transformer instance %(name)s " "with parameter %(param)s")
                % ({"pipeline": self, "name": transformer["name"], "param": parameter})
            )

        return transformers
示例#37
0
def _setup_pipeline_manager(cfg_file, transformer_manager, p_type=SAMPLE_TYPE):
    if not os.path.exists(cfg_file):
        cfg_file = cfg.CONF.find_file(cfg_file)

    LOG.debug("Pipeline config file: %s", cfg_file)

    with open(cfg_file) as fap:
        data = fap.read()

    pipeline_cfg = yaml.safe_load(data)
    LOG.info(_LI("Pipeline config: %s"), pipeline_cfg)

    return PipelineManager(
        pipeline_cfg, transformer_manager
        or extension.ExtensionManager('ceilometer.transformer', ), p_type)
示例#38
0
def _setup_pipeline_manager(cfg_file, transformer_manager, p_type=SAMPLE_TYPE):
    if not os.path.exists(cfg_file):
        cfg_file = cfg.CONF.find_file(cfg_file)

    LOG.debug("Pipeline config file: %s", cfg_file)

    with open(cfg_file) as fap:
        data = fap.read()

    pipeline_cfg = yaml.safe_load(data)
    LOG.info(_LI("Pipeline config: %s"), pipeline_cfg)

    return PipelineManager(
        pipeline_cfg, transformer_manager or extension.ExtensionManager("ceilometer.transformer"), p_type
    )
示例#39
0
    def reload_pipeline(self):
        LOG.info(_LI("Reloading notification agent and listeners."))

        self.pipe_manager = self._get_pipe_manager(self.transport,
                                                   self.pipeline_manager)

        # re-start the main queue listeners.
        utils.kill_listeners(self.listeners)
        self._configure_main_queue_listeners(self.pipe_manager,
                                             self.event_pipe_manager)

        # re-start the pipeline listeners if workload partitioning
        # is enabled.
        if cfg.CONF.notification.workload_partitioning:
            self._refresh_agent(None)
示例#40
0
    def clear_expired_alarm_history_data(self, alarm_history_ttl):
        """Clear expired alarm history data from the backend storage system.

        Clearing occurs according to the time-to-live.

        :param alarm_history_ttl: Number of seconds to keep alarm history
                                  records for.
        """
        session = self._engine_facade.get_session()
        with session.begin():
            valid_start = (timeutils.utcnow() -
                           datetime.timedelta(seconds=alarm_history_ttl))
            deleted_rows = (session.query(models.AlarmChange).filter(
                models.AlarmChange.timestamp < valid_start).delete())
            LOG.info(_LI("%d alarm histories are removed from database"),
                     deleted_rows)
示例#41
0
 def join_group(self, group_id):
     if not self._coordinator or not self._started or not group_id:
         return
     while True:
         try:
             join_req = self._coordinator.join_group(group_id)
             join_req.get()
             LOG.info(_LI('Joined partitioning group %s'), group_id)
             break
         except tooz.coordination.MemberAlreadyExist:
             return
         except tooz.coordination.GroupNotCreated:
             create_grp_req = self._coordinator.create_group(group_id)
             try:
                 create_grp_req.get()
             except tooz.coordination.GroupAlreadyExist:
                 pass
     self._groups.add(group_id)
示例#42
0
    def _setup_transformers(self, cfg, transformer_manager):
        transformers = []
        for transformer in self.transformer_cfg:
            parameter = transformer['parameters'] or {}
            try:
                ext = transformer_manager[transformer['name']]
            except KeyError:
                raise PipelineException(
                    "No transformer named %s loaded" % transformer['name'],
                    cfg)
            transformers.append(ext.plugin(**parameter))
            LOG.info(_LI(
                "Pipeline %(pipeline)s: Setup transformer instance %(name)s "
                "with parameter %(param)s") % ({'pipeline': self,
                                                'name': transformer['name'],
                                                'param': parameter}))

        return transformers
示例#43
0
    def connect(self, conf, url):
        connection_options = pymongo.uri_parser.parse_uri(url)
        del connection_options['database']
        del connection_options['username']
        del connection_options['password']
        del connection_options['collection']
        pool_key = tuple(connection_options)

        if pool_key in self._pool:
            client = self._pool.get(pool_key)()
            if client:
                return client
        splitted_url = netutils.urlsplit(url)
        log_data = {'db': splitted_url.scheme,
                    'nodelist': connection_options['nodelist']}
        LOG.info(_LI('Connecting to %(db)s on %(nodelist)s') % log_data)
        client = self._mongo_connect(conf, url)
        self._pool[pool_key] = weakref.ref(client)
        return client
示例#44
0
 def _inner():
     try:
         join_req = self._coordinator.join_group(group_id)
         join_req.get()
         LOG.info(_LI('Joined partitioning group %s'), group_id)
     except tooz.coordination.MemberAlreadyExist:
         return
     except tooz.coordination.GroupNotCreated:
         create_grp_req = self._coordinator.create_group(group_id)
         try:
             create_grp_req.get()
         except tooz.coordination.GroupAlreadyExist:
             pass
         raise ErrorJoiningPartitioningGroup()
     except tooz.coordination.ToozError:
         LOG.exception(_LE('Error joining partitioning group %s,'
                           ' re-trying'), group_id)
         raise ErrorJoiningPartitioningGroup()
     self._groups.add(group_id)
示例#45
0
    def record_events(self, event_models):
        """Write the events to SQL database via sqlalchemy.

        :param event_models: a list of model.Event objects.
        """
        session = self._engine_facade.get_session()
        error = None
        for event_model in event_models:
            event = None
            try:
                with session.begin():
                    event_type = self._get_or_create_event_type(
                        event_model.event_type, session=session)
                    event = models.Event(event_model.message_id, event_type,
                                         event_model.generated,
                                         event_model.raw)
                    session.add(event)
                    session.flush()

                    if event_model.traits:
                        trait_map = {}
                        for trait in event_model.traits:
                            if trait_map.get(trait.dtype) is None:
                                trait_map[trait.dtype] = []
                            trait_map[trait.dtype].append({
                                'event_id': event.id,
                                'key': trait.name,
                                'value': trait.value
                            })
                        for dtype in trait_map.keys():
                            model = TRAIT_ID_TO_MODEL[dtype]
                            session.execute(model.__table__.insert(),
                                            trait_map[dtype])
            except dbexc.DBDuplicateEntry as e:
                LOG.info(_LI("Duplicate event detected, skipping it: %s") % e)
            except KeyError as e:
                LOG.exception(_LE('Failed to record event: %s') % e)
            except Exception as e:
                LOG.exception(_LE('Failed to record event: %s') % e)
                error = e
        if error:
            raise error
示例#46
0
    def __init__(self, cfg):
        """Setup the polling according to config.

        The configuration is the sources half of the Pipeline Config.
        """
        self.sources = []
        if not ('sources' in cfg and 'sinks' in cfg):
            raise PipelineException("Both sources & sinks are required", cfg)
        LOG.info(_LI('detected decoupled pipeline config format'))

        unique_names = set()
        for s in cfg.get('sources', []):
            name = s.get('name')
            if name in unique_names:
                raise PipelineException("Duplicated source names: %s" % name,
                                        self)
            else:
                unique_names.add(name)
                self.sources.append(SampleSource(s))
        unique_names.clear()
示例#47
0
    def _refresh(self, alarm, state, reason, reason_data):
        """Refresh alarm state."""
        try:
            previous = alarm.state
            if previous != state:
                LOG.info(
                    _LI('alarm %(id)s transitioning to %(state)s because '
                        '%(reason)s') % {
                            'id': alarm.alarm_id,
                            'state': state,
                            'reason': reason
                        })

                self._client.alarms.set_state(alarm.alarm_id, state=state)
            alarm.state = state
            if self.notifier:
                self.notifier.notify(alarm, previous, reason, reason_data)
        except Exception:
            # retry will occur naturally on the next evaluation
            # cycle (unless alarm state reverts in the meantime)
            LOG.exception(_('alarm state update failed'))
示例#48
0
    def check_alarm_actions(alarm):
        actions_schema = ceilometer_alarm.NOTIFIER_SCHEMAS
        max_actions = cfg.CONF.alarm.alarm_max_actions
        for state in state_kind:
            actions_name = state.replace(" ", "_") + '_actions'
            actions = getattr(alarm, actions_name)
            if not actions:
                continue

            action_set = set(actions)
            if len(actions) != len(action_set):
                LOG.info(
                    _LI('duplicate actions are found: %s, '
                        'remove duplicate ones') % actions)
                actions = list(action_set)
                setattr(alarm, actions_name, actions)

            if 0 < max_actions < len(actions):
                error = _('%(name)s count exceeds maximum value '
                          '%(maximum)d') % {
                              "name": actions_name,
                              "maximum": max_actions
                          }
                raise base.ClientSideError(error)

            limited = rbac.get_limited_to_project(pecan.request.headers)

            for action in actions:
                try:
                    url = netutils.urlsplit(action)
                except Exception:
                    error = _("Unable to parse action %s") % action
                    raise base.ClientSideError(error)
                if url.scheme not in actions_schema:
                    error = _("Unsupported action %s") % action
                    raise base.ClientSideError(error)
                if limited and url.scheme in ('log', 'test'):
                    error = _('You are not authorized to create '
                              'action: %s') % action
                    raise base.ClientSideError(error, status_code=401)
示例#49
0
    def record_events(self, events):
        def _build_bulk_index(event_list):
            for ev in event_list:
                traits = {t.name: t.value for t in ev.traits}
                yield {
                    '_op_type':
                    'create',
                    '_index':
                    '%s_%s' %
                    (self.index_name, ev.generated.date().isoformat()),
                    '_type':
                    ev.event_type,
                    '_id':
                    ev.message_id,
                    '_source': {
                        'timestamp': ev.generated.isoformat(),
                        'traits': traits,
                        'raw': ev.raw
                    }
                }

        error = None
        for ok, result in helpers.streaming_bulk(self.conn,
                                                 _build_bulk_index(events)):
            if not ok:
                __, result = result.popitem()
                if result['status'] == 409:
                    LOG.info(
                        _LI('Duplicate event detected, skipping it: %s') %
                        result)
                else:
                    LOG.exception(_LE('Failed to record event: %s') % result)
                    error = storage.StorageUnknownWriteError(result)

        if self._refresh_on_write:
            self.conn.indices.refresh(index='%s_*' % self.index_name)
            while self.conn.cluster.pending_tasks(local=True)['tasks']:
                pass
        if error:
            raise error
示例#50
0
def load_definitions(defaults, config_file, fallback_file=None):
    """Setup a definitions from yaml config file."""

    if not os.path.exists(config_file):
        config_file = cfg.CONF.find_file(config_file)
    if not config_file and fallback_file is not None:
        LOG.debug("No Definitions configuration file found!"
                  "Using default config.")
        config_file = fallback_file

    if config_file is not None:
        LOG.debug("Loading definitions configuration file: %s", config_file)

        with open(config_file) as cf:
            config = cf.read()

        try:
            definition_cfg = yaml.safe_load(config)
        except yaml.YAMLError as err:
            if hasattr(err, 'problem_mark'):
                mark = err.problem_mark
                errmsg = (
                    _("Invalid YAML syntax in Definitions file "
                      "%(file)s at line: %(line)s, column: %(column)s.") %
                    dict(file=config_file,
                         line=mark.line + 1,
                         column=mark.column + 1))
            else:
                errmsg = (_("YAML error reading Definitions file "
                            "%(file)s") % dict(file=config_file))
            LOG.error(errmsg)
            raise

    else:
        LOG.debug("No Definitions configuration file found!"
                  "Using default config.")
        definition_cfg = defaults

    LOG.info(_LI("Definitions: %s"), definition_cfg)
    return definition_cfg
示例#51
0
 def get_samples(self, manager, cache, resources):
     for endpoint in resources:
         for ip in self._iter_floating_ips(manager.keystone, cache,
                                           endpoint):
             LOG.info(_LI("FLOATING IP USAGE: %s") % ip.ip)
             # FIXME (flwang) Now Nova API /os-floating-ips can't provide
             # those attributes were used by Ceilometer, such as project
             # id, host. In this fix, those attributes usage will be
             # removed temporarily. And they will be back after fix the
             # Nova bug 1174802.
             yield sample.Sample(name='ip.floating',
                                 type=sample.TYPE_GAUGE,
                                 unit='ip',
                                 volume=1,
                                 user_id=None,
                                 project_id=None,
                                 resource_id=ip.id,
                                 timestamp=timeutils.utcnow().isoformat(),
                                 resource_metadata={
                                     'address': ip.ip,
                                     'pool': ip.pool
                                 })
示例#52
0
    def __init__(self, parsed_url):
        options = urlparse.parse_qs(parsed_url.query)
        # the values of the option is a list of url params values
        # only take care of the latest one if the option
        # is provided more than once
        self.per_meter_topic = bool(int(
            options.get('per_meter_topic', [0])[-1]))

        self.policy = options.get('policy', ['default'])[-1]
        self.max_queue_length = int(options.get(
            'max_queue_length', [1024])[-1])
        self.max_retry = 0

        self.local_queue = []

        if self.policy in ['default', 'queue', 'drop']:
            LOG.info(_LI('Publishing policy set to %s') % self.policy)
        else:
            LOG.warn(_('Publishing policy is unknown (%s) force to default')
                     % self.policy)
            self.policy = 'default'

        self.retry = 1 if self.policy in ['queue', 'drop'] else None
示例#53
0
def load_app(conf):
    global APPCONFIGS

    # Build the WSGI app
    cfg_file = None
    cfg_path = conf.api_paste_config
    if not os.path.isabs(cfg_path):
        cfg_file = conf.find_file(cfg_path)
    elif os.path.exists(cfg_path):
        cfg_file = cfg_path

    if not cfg_file:
        raise cfg.ConfigFilesNotFoundError([conf.api_paste_config])

    configkey = str(uuid.uuid4())
    APPCONFIGS[configkey] = conf

    LOG.info(_LI("Full WSGI config used: %s"), cfg_file)
    LOG.warning(
        _LW("Note: Ceilometer API is deprecated; use APIs from Aodh"
            " (alarms), Gnocchi (metrics) and/or Panko (events)."))
    return deploy.loadapp("config:" + cfg_file,
                          global_conf={'configkey': configkey})
示例#54
0
def setup_events(trait_plugin_mgr):
    """Setup the event definitions from yaml config file."""
    config_file = get_config_file()
    if config_file is not None:
        LOG.debug("Event Definitions configuration file: %s", config_file)

        with open(config_file) as cf:
            config = cf.read()

        try:
            events_config = yaml.safe_load(config)
        except yaml.YAMLError as err:
            if hasattr(err, 'problem_mark'):
                mark = err.problem_mark
                errmsg = (
                    _("Invalid YAML syntax in Event Definitions file "
                      "%(file)s at line: %(line)s, column: %(column)s.") %
                    dict(file=config_file,
                         line=mark.line + 1,
                         column=mark.column + 1))
            else:
                errmsg = (_("YAML error reading Event Definitions file "
                            "%(file)s") % dict(file=config_file))
            LOG.error(errmsg)
            raise

    else:
        LOG.debug("No Event Definitions configuration file found!"
                  " Using default config.")
        events_config = []

    LOG.info(_LI("Event Definitions: %s"), events_config)

    allow_drop = cfg.CONF.event.drop_unmatched_notifications
    return NotificationEventsConverter(events_config,
                                       trait_plugin_mgr,
                                       add_catchall=not allow_drop)
示例#55
0
 def start(self):
     try:
         self._coordinator.start(start_heart=True)
         LOG.info(_LI('Coordination backend started successfully.'))
     except tooz.coordination.ToozError:
         LOG.exception(_LE('Error connecting to coordination backend.'))
示例#56
0
def db_clean_legacy():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.strOpt('confirm-drop-alarm-table',
                   short='n',
                   help='confirm to drop the legacy alarm tables')
    ])
    if not conf.confirm_drop_alarm_table:
        confirm = moves.input("Do you really want to drop the legacy alarm "
                              "tables? This will destroy data definitely "
                              "if it exist. Please type 'YES' to confirm: ")
        if confirm != 'YES':
            print("DB legacy cleanup aborted!")
            return

    service.prepare_service(conf=conf)
    for purpose in ['metering', 'event']:
        url = (getattr(conf.database, '%s_connection' % purpose)
               or conf.database.connection)
        parsed = urlparse.urlparse(url)

        if parsed.password:
            masked_netloc = '****'.join(parsed.netloc.rsplit(parsed.password))
            masked_url = parsed._replace(netloc=masked_netloc)
            masked_url = urlparse.urlunparse(masked_url)
        else:
            masked_url = url
        LOG.info(
            _LI('Starting to drop alarm and alarm history tables in '
                '%(purpose)s backend: %(url)s'), {
                    'purpose': purpose,
                    'url': masked_url
                })

        connection_scheme = parsed.scheme
        conn = storage.get_connection_from_config(conf, purpose)
        if connection_scheme in ('mysql', 'mysql+pymysql', 'postgresql',
                                 'sqlite'):
            engine = conn._engine_facade.get_engine()
            meta = sa.MetaData(bind=engine)
            for table_name in ['alarm', 'alarm_history']:
                if engine.has_table(table_name):
                    alarm = sa.Table(table_name, meta, autoload=True)
                    alarm.drop()
                    LOG.info(
                        _LI("Legacy %s table of SQL backend has been "
                            "dropped."), table_name)
                else:
                    LOG.info(_LI('%s table does not exist.'), table_name)

        elif connection_scheme == 'hbase':
            with conn.conn_pool.connection() as h_conn:
                tables = h_conn.tables()
                table_name_mapping = {
                    'alarm': 'alarm',
                    'alarm_h': 'alarm history'
                }
                for table_name in ['alarm', 'alarm_h']:
                    try:
                        if table_name in tables:
                            h_conn.disable_table(table_name)
                            h_conn.delete_table(table_name)
                            LOG.info(
                                _LI("Legacy %s table of Hbase backend "
                                    "has been dropped."),
                                table_name_mapping[table_name])
                        else:
                            LOG.info(_LI('%s table does not exist.'),
                                     table_name_mapping[table_name])
                    except Exception as e:
                        LOG.error(
                            _LE('Error occurred while dropping alarm '
                                'tables of Hbase, %s'), e)

        elif connection_scheme == 'mongodb':
            for table_name in ['alarm', 'alarm_history']:
                if table_name in conn.db.conn.collection_names():
                    conn.db.conn.drop_collection(table_name)
                    LOG.info(
                        _LI("Legacy %s table of Mongodb backend has been "
                            "dropped."), table_name)
                else:
                    LOG.info(_LI('%s table does not exist.'), table_name)
    LOG.info('Legacy alarm tables cleanup done.')
示例#57
0
    def poll_and_notify(self):
        """Polling sample and notify."""
        cache = {}
        discovery_cache = {}
        poll_history = {}
        for source_name in self.pollster_matches:
            for pollster in self.pollster_matches[source_name]:
                key = Resources.key(source_name, pollster)
                candidate_res = list(self.resources[key].get(discovery_cache))
                if not candidate_res and pollster.obj.default_discovery:
                    candidate_res = self.manager.discover(
                        [pollster.obj.default_discovery], discovery_cache)

                # Remove duplicated resources and black resources. Using
                # set() requires well defined __hash__ for each resource.
                # Since __eq__ is defined, 'not in' is safe here.
                polling_resources = []
                black_res = self.resources[key].blacklist
                history = poll_history.get(pollster.name, [])
                for x in candidate_res:
                    if x not in history:
                        history.append(x)
                        if x not in black_res:
                            polling_resources.append(x)
                poll_history[pollster.name] = history

                # If no resources, skip for this pollster
                if not polling_resources:
                    p_context = 'new ' if history else ''
                    LOG.info(
                        _LI("Skip pollster %(name)s, no %(p_context)s"
                            "resources found this cycle"), {
                                'name': pollster.name,
                                'p_context': p_context
                            })
                    continue

                LOG.info(
                    _LI("Polling pollster %(poll)s in the context of "
                        "%(src)s"), dict(poll=pollster.name, src=source_name))
                try:
                    samples = pollster.obj.get_samples(
                        manager=self.manager,
                        cache=cache,
                        resources=polling_resources)
                    sample_batch = []

                    for sample in samples:
                        sample_dict = (
                            publisher_utils.meter_message_from_counter(
                                sample, self._telemetry_secret))
                        if self._batch:
                            sample_batch.append(sample_dict)
                        else:
                            self._send_notification([sample_dict])

                    if sample_batch:
                        self._send_notification(sample_batch)

                except plugin_base.PollsterPermanentError as err:
                    LOG.error(
                        _('Prevent pollster %(name)s for '
                          'polling source %(source)s anymore!') %
                        ({
                            'name': pollster.name,
                            'source': source_name
                        }))
                    self.resources[key].blacklist.extend(err.fail_res_list)
                except Exception as err:
                    LOG.warning(
                        _('Continue after error from %(name)s: %(error)s') %
                        ({
                            'name': pollster.name,
                            'error': err
                        }),
                        exc_info=True)
示例#58
0
 def leave_group(self, group_id):
     if self._coordinator:
         self._coordinator.leave_group(group_id)
         LOG.info(_LI('Left partitioning group %s'), group_id)
示例#59
0
    def __init__(self,
                 conf,
                 cfg_file,
                 transformer_manager,
                 p_type=SAMPLE_TYPE):
        """Setup the pipelines according to config.

        The configuration is supported as follows:

        Decoupled: the source and sink configuration are separately
        specified before being linked together. This allows source-
        specific configuration, such as meter handling, to be
        kept focused only on the fine-grained source while avoiding
        the necessity for wide duplication of sink-related config.

        The configuration is provided in the form of separate lists
        of dictionaries defining sources and sinks, for example:

        {"sources": [{"name": source_1,
                      "meters" : ["meter_1", "meter_2"],
                      "sinks" : ["sink_1", "sink_2"]
                     },
                     {"name": source_2,
                      "meters" : ["meter_3"],
                      "sinks" : ["sink_2"]
                     },
                    ],
         "sinks": [{"name": sink_1,
                    "transformers": [
                           {"name": "Transformer_1",
                         "parameters": {"p1": "value"}},

                           {"name": "Transformer_2",
                            "parameters": {"p1": "value"}},
                          ],
                     "publishers": ["publisher_1", "publisher_2"]
                    },
                    {"name": sink_2,
                     "publishers": ["publisher_3"]
                    },
                   ]
        }

        Valid meter format is '*', '!meter_name', or 'meter_name'.
        '*' is wildcard symbol means any meters; '!meter_name' means
        "meter_name" will be excluded; 'meter_name' means 'meter_name'
        will be included.

        Valid meters definition is all "included meter names", all
        "excluded meter names", wildcard and "excluded meter names", or
        only wildcard.

        Transformer's name is plugin name in setup.cfg.

        Publisher's name is plugin name in setup.cfg

        """
        super(PipelineManager, self).__init__(conf)
        cfg = self.load_config(cfg_file)
        self.pipelines = []
        if not ('sources' in cfg and 'sinks' in cfg):
            raise PipelineException("Both sources & sinks are required", cfg)
        LOG.info(_LI('detected decoupled pipeline config format'))
        publisher_manager = PublisherManager(self.conf, p_type['name'])

        unique_names = set()
        sources = []
        for s in cfg.get('sources'):
            name = s.get('name')
            if name in unique_names:
                raise PipelineException("Duplicated source names: %s" % name,
                                        self)
            else:
                unique_names.add(name)
                sources.append(p_type['source'](s))
        unique_names.clear()

        sinks = {}
        for s in cfg.get('sinks'):
            name = s.get('name')
            if name in unique_names:
                raise PipelineException("Duplicated sink names: %s" % name,
                                        self)
            else:
                unique_names.add(name)
                sinks[s['name']] = p_type['sink'](self.conf, s,
                                                  transformer_manager,
                                                  publisher_manager)
        unique_names.clear()

        for source in sources:
            source.check_sinks(sinks)
            for target in source.sinks:
                pipe = p_type['pipeline'](self.conf, source, sinks[target])
                if pipe.name in unique_names:
                    raise PipelineException(
                        "Duplicate pipeline name: %s. Ensure pipeline"
                        " names are unique. (name is the source and sink"
                        " names combined)" % pipe.name, cfg)
                else:
                    unique_names.add(pipe.name)
                    self.pipelines.append(pipe)
        unique_names.clear()
示例#60
0
    def __init__(self, conf, cfg_info, transformer_manager,
                 p_type=SAMPLE_TYPE):
        """Setup the pipelines according to config.

        The configuration is supported as follows:

        Decoupled: the source and sink configuration are separately
        specified before being linked together. This allows source-
        specific configuration, such as resource discovery, to be
        kept focused only on the fine-grained source while avoiding
        the necessity for wide duplication of sink-related config.

        The configuration is provided in the form of separate lists
        of dictionaries defining sources and sinks, for example:

        {"sources": [{"name": source_1,
                      "interval": interval_time,
                      "meters" : ["meter_1", "meter_2"],
                      "resources": ["resource_uri1", "resource_uri2"],
                      "sinks" : ["sink_1", "sink_2"]
                     },
                     {"name": source_2,
                      "interval": interval_time,
                      "meters" : ["meter_3"],
                      "sinks" : ["sink_2"]
                     },
                    ],
         "sinks": [{"name": sink_1,
                    "transformers": [
                           {"name": "Transformer_1",
                         "parameters": {"p1": "value"}},

                           {"name": "Transformer_2",
                            "parameters": {"p1": "value"}},
                          ],
                     "publishers": ["publisher_1", "publisher_2"]
                    },
                    {"name": sink_2,
                     "publishers": ["publisher_3"]
                    },
                   ]
        }

        The interval determines the cadence of sample injection into
        the pipeline where samples are produced under the direct control
        of an agent, i.e. via a polling cycle as opposed to incoming
        notifications.

        Valid meter format is '*', '!meter_name', or 'meter_name'.
        '*' is wildcard symbol means any meters; '!meter_name' means
        "meter_name" will be excluded; 'meter_name' means 'meter_name'
        will be included.

        The 'meter_name" is Sample name field.

        Valid meters definition is all "included meter names", all
        "excluded meter names", wildcard and "excluded meter names", or
        only wildcard.

        The resources is list of URI indicating the resources from where
        the meters should be polled. It's optional and it's up to the
        specific pollster to decide how to use it.

        Transformer's name is plugin name in setup.cfg.

        Publisher's name is plugin name in setup.cfg

        """
        super(PipelineManager, self).__init__(conf)
        cfg = self.load_config(cfg_info)
        self.pipelines = []
        if not ('sources' in cfg and 'sinks' in cfg):
            raise PipelineException("Both sources & sinks are required",
                                    cfg)
        LOG.info(_LI('detected decoupled pipeline config format'))

        unique_names = set()
        sources = []
        for s in cfg.get('sources'):
            name = s.get('name')
            if name in unique_names:
                raise PipelineException("Duplicated source names: %s" %
                                        name, self)
            else:
                unique_names.add(name)
                sources.append(p_type['source'](s))
        unique_names.clear()

        sinks = {}
        for s in cfg.get('sinks'):
            name = s.get('name')
            if name in unique_names:
                raise PipelineException("Duplicated sink names: %s" %
                                        name, self)
            else:
                unique_names.add(name)
                sinks[s['name']] = p_type['sink'](self.conf, s,
                                                  transformer_manager)
        unique_names.clear()

        for source in sources:
            source.check_sinks(sinks)
            for target in source.sinks:
                pipe = p_type['pipeline'](self.conf, source, sinks[target])
                if pipe.name in unique_names:
                    raise PipelineException(
                        "Duplicate pipeline name: %s. Ensure pipeline"
                        " names are unique. (name is the source and sink"
                        " names combined)" % pipe.name, cfg)
                else:
                    unique_names.add(pipe.name)
                    self.pipelines.append(pipe)
        unique_names.clear()