Example #1
0
    def get_samples(self, manager, cache, resources):
        # Only one resource for Node Manager pollster
        try:
            stats = self.read_data(cache)
        except nmexcept.IPMIException:
            self.polling_failures += 1
            LOG.warning(_('Polling %(name)s failed for %(cnt)s times!')
                        % ({'name': self.NAME,
                            'cnt': self.polling_failures}))
            if 0 <= CONF.ipmi.polling_retry < self.polling_failures:
                LOG.warning(_('Pollster for %s is disabled!') % self.NAME)
                raise plugin_base.PollsterPermanentError(resources)
            else:
                return

        self.polling_failures = 0

        metadata = {
            'node': CONF.host
        }

        if stats:
            data = self.get_value(stats)

            yield sample.Sample(
                name=self.NAME,
                type=self.TYPE,
                unit=self.UNIT,
                volume=data,
                user_id=None,
                project_id=None,
                resource_id=CONF.host,
                timestamp=timeutils.utcnow().isoformat(),
                resource_metadata=metadata)
Example #2
0
    def _calculate(self, resource_id):
        """Evaluate the expression and return a new sample if successful."""
        ns_dict = dict((m, s.as_dict()) for m, s
                       in six.iteritems(self.cache[resource_id]))
        ns = transformer.Namespace(ns_dict)
        try:
            new_volume = eval(self.expr_escaped, {}, ns)
            if math.isnan(new_volume):
                raise ArithmeticError(_('Expression evaluated to '
                                        'a NaN value!'))

            reference_sample = self.cache[resource_id][self.reference_meter]
            return sample.Sample(
                name=self.target.get('name', reference_sample.name),
                unit=self.target.get('unit', reference_sample.unit),
                type=self.target.get('type', reference_sample.type),
                volume=float(new_volume),
                user_id=reference_sample.user_id,
                project_id=reference_sample.project_id,
                resource_id=reference_sample.resource_id,
                timestamp=self.latest_timestamp,
                resource_metadata=reference_sample.resource_metadata
            )
        except Exception as e:
            LOG.warn(_('Unable to evaluate expression %(expr)s: %(exc)s'),
                     {'expr': self.expr, 'exc': e})
Example #3
0
    def inspect_memory_usage(self, instance, duration=None):
        instance_name = util.instance_name(instance)
        domain = self._lookup_by_uuid(instance)
        state = domain.info()[0]
        if state == libvirt.VIR_DOMAIN_SHUTOFF:
            LOG.warn(_('Failed to inspect memory usage of instance Name '
                       '%(instance_name)s UUID %(instance_uuid)s, '
                       'domain is in state of SHUTOFF'),
                     {'instance_name': instance_name,
                      'instance_uuid': instance.id})
            return

        try:
            memory_stats = domain.memoryStats()
            if (memory_stats and
                    memory_stats.get('available') and
                    memory_stats.get('unused')):
                memory_used = (memory_stats.get('available') -
                               memory_stats.get('unused'))
                # Stat provided from libvirt is in KB, converting it to MB.
                memory_used = memory_used / units.Ki
                return virt_inspector.MemoryUsageStats(usage=memory_used)
            else:
                LOG.warn(_('Failed to inspect memory usage of instance Name '
                           '%(instance_name)s UUID %(instance_uuid)s, '
                           'can not get info from libvirt'),
                         {'instance_name': instance_name,
                          'instance_uuid': instance.id})
        # memoryStats might launch an exception if the method
        # is not supported by the underlying hypervisor being
        # used by libvirt
        except libvirt.libvirtError as e:
            LOG.warn(_('Failed to inspect memory usage of %(instance_uuid)s, '
                       'can not get info from libvirt: %(error)s'),
                     {'instance_uuid': instance.id, 'error': e})
Example #4
0
 def _reason(cls, alarm, statistics, distilled, state):
     """Fabricate reason string."""
     count = len(statistics)
     disposition = "inside" if state == evaluator.OK else "outside"
     last = getattr(statistics[-1], alarm.rule["statistic"])
     transition = alarm.state != state
     reason_data = cls._reason_data(disposition, count, last)
     if transition:
         return (
             (
                 _(
                     "Transition to %(state)s due to %(count)d samples"
                     " %(disposition)s threshold, most recent:"
                     " %(most_recent)s"
                 )
                 % dict(reason_data, state=state)
             ),
             reason_data,
         )
     return (
         (
             _(
                 "Remaining as %(state)s due to %(count)d samples"
                 " %(disposition)s threshold, most recent: %(most_recent)s"
             )
             % dict(reason_data, state=state)
         ),
         reason_data,
     )
Example #5
0
 def inspect_cpu_l3_cache(self, instance):
     domain = self._lookup_by_uuid(instance)
     try:
         stats = self.connection.domainListGetStats(
             [domain], libvirt.VIR_DOMAIN_STATS_PERF)
         perf = stats[0][1]
         usage = perf["perf.cmt"]
         return virt_inspector.CPUL3CacheUsageStats(l3_cache_usage=usage)
     except (KeyError, AttributeError) as e:
         # NOTE(sileht): KeyError if for libvirt >=2.0.0,<2.3.0, the perf
         # subsystem ws existing but not  these attributes
         # https://github.com/libvirt/libvirt/commit/bae660869de0612bee2a740083fb494c27e3f80c
         msg = _('Perf is not supported by current version of libvirt, and '
                 'failed to inspect l3 cache usage of %(instance_uuid)s, '
                 'can not get info from libvirt: %(error)s') % {
             'instance_uuid': instance.id, 'error': e}
         raise virt_inspector.NoDataException(msg)
     # domainListGetStats might launch an exception if the method or
     # cmt perf event is not supported by the underlying hypervisor
     # being used by libvirt.
     except libvirt.libvirtError as e:
         msg = _('Failed to inspect l3 cache usage of %(instance_uuid)s, '
                 'can not get info from libvirt: %(error)s') % {
             'instance_uuid': instance.id, 'error': e}
         raise virt_inspector.NoDataException(msg)
Example #6
0
    def __init__(self, parsed_url):
        super(FilePublisher, self).__init__(parsed_url)

        self.publisher_logger = None
        path = parsed_url.path
        if not path or path.lower() == 'file':
            LOG.error(_('The path for the file publisher is required'))
            return

        rfh = None
        max_bytes = 0
        backup_count = 0
        # Handling other configuration options in the query string
        if parsed_url.query:
            params = urlparse.parse_qs(parsed_url.query)
            if params.get('max_bytes') and params.get('backup_count'):
                try:
                    max_bytes = int(params.get('max_bytes')[0])
                    backup_count = int(params.get('backup_count')[0])
                except ValueError:
                    LOG.error(_('max_bytes and backup_count should be '
                              'numbers.'))
                    return
        # create rotating file handler
        rfh = logging.handlers.RotatingFileHandler(
            path, encoding='utf8', maxBytes=max_bytes,
            backupCount=backup_count)

        self.publisher_logger = logging.Logger('publisher.file')
        self.publisher_logger.propagate = False
        self.publisher_logger.setLevel(logging.INFO)
        rfh.setLevel(logging.INFO)
        self.publisher_logger.addHandler(rfh)
Example #7
0
    def handle_sample(self, context, s):
        """Handle a sample, converting if necessary."""
        LOG.debug(_('handling sample %s'), (s,))
        key = s.name + s.resource_id
        prev = self.cache.get(key)
        timestamp = timeutils.parse_isotime(s.timestamp)
        self.cache[key] = (s.volume, timestamp)

        if prev:
            prev_volume = prev[0]
            prev_timestamp = prev[1]
            time_delta = timeutils.delta_seconds(prev_timestamp, timestamp)
            # we only allow negative deltas for noncumulative samples, whereas
            # for cumulative we assume that a reset has occurred in the interim
            # so that the current volume gives a lower bound on growth
            volume_delta = (s.volume - prev_volume
                            if (prev_volume <= s.volume or
                                s.type != sample.TYPE_CUMULATIVE)
                            else s.volume)
            rate_of_change = ((1.0 * volume_delta / time_delta)
                              if time_delta else 0.0)

            s = self._convert(s, rate_of_change)
            LOG.debug(_('converted to: %s'), (s,))
        else:
            LOG.warn(_('dropping sample with no predecessor: %s'),
                     (s,))
            s = None
        return s
Example #8
0
    def handle_sample(self, s):
        """Handle a sample, converting if necessary."""
        LOG.debug("handling sample %s", s)
        key = s.name + s.resource_id
        prev = self.cache.get(key)
        timestamp = timeutils.parse_isotime(s.timestamp)
        self.cache[key] = (s.volume, timestamp)

        if prev:
            prev_volume = prev[0]
            prev_timestamp = prev[1]
            time_delta = timeutils.delta_seconds(prev_timestamp, timestamp)
            # disallow violations of the arrow of time
            if time_delta < 0:
                LOG.warning(_("dropping out of time order sample: %s"), (s,))
                # Reset the cache to the newer sample.
                self.cache[key] = prev
                return None
            # we only allow negative volume deltas for noncumulative
            # samples, whereas for cumulative we assume that a reset has
            # occurred in the interim so that the current volume gives a
            # lower bound on growth
            volume_delta = (
                s.volume - prev_volume if (prev_volume <= s.volume or s.type != sample.TYPE_CUMULATIVE) else s.volume
            )
            rate_of_change = (1.0 * volume_delta / time_delta) if time_delta else 0.0

            s = self._convert(s, rate_of_change)
            LOG.debug("converted to: %s", s)
        else:
            LOG.warning(_("dropping sample with no predecessor: %s"), (s,))
            s = None
        return s
Example #9
0
def _event_query_to_event_filter(q):
    evt_model_filter = {"event_type": None, "message_id": None, "start_timestamp": None, "end_timestamp": None}
    filters = _build_rbac_query_filters()
    traits_filter = filters["t_filter"]
    admin_proj = filters["admin_proj"]

    for i in q:
        if not i.op:
            i.op = "eq"
        elif i.op not in base.operation_kind:
            error = _("operator %(operator)s is not supported. the supported" " operators are: %(supported)s") % {
                "operator": i.op,
                "supported": base.operation_kind,
            }
            raise base.ClientSideError(error)
        if i.field in evt_model_filter:
            if i.op != "eq":
                error = _(
                    "operator %(operator)s is not supported. Only"
                    " equality operator is available for field"
                    " %(field)s"
                ) % {"operator": i.op, "field": i.field}
                raise base.ClientSideError(error)
            evt_model_filter[i.field] = i.value
        else:
            trait_type = i.type or "string"
            traits_filter.append({"key": i.field, trait_type: i._get_value_as_type(), "op": i.op})
    return storage.EventFilter(traits_filter=traits_filter, admin_proj=admin_proj, **evt_model_filter)
Example #10
0
 def handle_sample(self, context, s):
     """Handle a sample, converting if necessary."""
     LOG.debug(_('handling sample %s'), (s,))
     if self.source.get('unit', s.unit) == s.unit:
         s = self._convert(s)
         LOG.debug(_('converted to: %s'), (s,))
     return s
Example #11
0
    def inspect_memory_bandwidth(self, instance, duration=None):
        domain = self._get_domain_not_shut_off_or_raise(instance)

        try:
            stats = self.connection.domainListGetStats(
                [domain], libvirt.VIR_DOMAIN_STATS_PERF)
            perf = stats[0][1]
            return virt_inspector.MemoryBandwidthStats(total=perf["perf.mbmt"],
                                                       local=perf["perf.mbml"])
        except (KeyError, AttributeError) as e:
            # NOTE(sileht): KeyError if for libvirt >=2.0.0,<2.3.0, the perf
            # subsystem ws existing but not  these attributes
            # https://github.com/libvirt/libvirt/commit/bae660869de0612bee2a740083fb494c27e3f80c
            msg = _('Perf is not supported by current version of libvirt, and '
                    'failed to inspect memory bandwidth of %(instance_uuid)s, '
                    'can not get info from libvirt: %(error)s') % {
                'instance_uuid': instance.id, 'error': e}
            raise virt_inspector.NoDataException(msg)
        # domainListGetStats might launch an exception if the method or
        # mbmt/mbml perf event is not supported by the underlying hypervisor
        # being used by libvirt.
        except libvirt.libvirtError as e:
            msg = _('Failed to inspect memory bandwidth of %(instance_uuid)s, '
                    'can not get info from libvirt: %(error)s') % {
                'instance_uuid': instance.id, 'error': e}
            raise virt_inspector.NoDataException(msg)
Example #12
0
 def get_samples(self, manager, cache, resources):
     for instance in resources:
         LOG.debug(_("checking instance %s"), instance.id)
         try:
             cpu_info = self.inspector.inspect_cpus(instance)
             LOG.debug(
                 _("CPUTIME USAGE: %(instance)s %(time)d"), {"instance": instance.__dict__, "time": cpu_info.time}
             )
             cpu_num = {"cpu_number": cpu_info.number}
             yield util.make_sample_from_instance(
                 instance,
                 name="cpu",
                 type=sample.TYPE_CUMULATIVE,
                 unit="ns",
                 volume=cpu_info.time,
                 additional_metadata=cpu_num,
             )
         except virt_inspector.InstanceNotFoundException as err:
             # Instance was deleted while getting samples. Ignore it.
             LOG.debug(_("Exception while getting samples %s"), err)
         except ceilometer.NotImplementedError:
             # Selected inspector does not implement this pollster.
             LOG.debug(_("Obtaining CPU time is not implemented for %s"), self.inspector.__class__.__name__)
         except Exception as err:
             LOG.exception(_("could not get CPU time for %(id)s: %(e)s"), {"id": instance.id, "e": err})
Example #13
0
    def get_one(self, message_id):
        """Return a single event with the given message id.

        :param message_id: Message ID of the Event to be returned
        """
        rbac.enforce("events:show", pecan.request)
        filters = _build_rbac_query_filters()
        t_filter = filters['t_filter']
        admin_proj = filters['admin_proj']
        event_filter = storage.EventFilter(traits_filter=t_filter,
                                           admin_proj=admin_proj,
                                           message_id=message_id)
        events = [event for event
                  in pecan.request.event_storage_conn.get_events(event_filter)]
        if not events:
            raise base.EntityNotFound(_("Event"), message_id)

        if len(events) > 1:
            LOG.error(_("More than one event with "
                        "id %s returned from storage driver") % message_id)

        event = events[0]

        return Event(message_id=event.message_id,
                     event_type=event.event_type,
                     generated=event.generated,
                     traits=event.traits,
                     raw=event.raw)
Example #14
0
 def get_samples(self, manager, cache, resources):
     self._inspection_duration = self._record_poll_time()
     for instance in resources:
         LOG.debug(_('Checking CPU util for instance %s'), instance.id)
         try:
             cpu_info = self.inspector.inspect_cpu_util(
                 instance, self._inspection_duration)
             LOG.debug(_("CPU UTIL: %(instance)s %(util)d"),
                       ({'instance': instance.__dict__,
                         'util': cpu_info.util}))
             yield util.make_sample_from_instance(
                 instance,
                 name='cpu_util',
                 type=sample.TYPE_GAUGE,
                 unit='%',
                 volume=cpu_info.util,
             )
         except virt_inspector.InstanceNotFoundException as err:
             # Instance was deleted while getting samples. Ignore it.
             LOG.debug(_('Exception while getting samples %s'), err)
         except ceilometer.NotImplementedError:
             # Selected inspector does not implement this pollster.
             LOG.debug(_('Obtaining CPU Util is not implemented for %s'),
                       self.inspector.__class__.__name__)
         except Exception as err:
             LOG.exception(_('Could not get CPU Util for %(id)s: %(e)s'),
                           {'id': instance.id, 'e': err})
Example #15
0
    def _publish_samples(self, start, ctxt, samples):
        """Push samples into pipeline for publishing.

        :param start: The first transformer that the sample will be injected.
                      This is mainly for flush() invocation that transformer
                      may emit samples.
        :param ctxt: Execution context from the manager or service.
        :param samples: Sample list.

        """

        transformed_samples = []
        if not self.transformers:
            transformed_samples = samples
        else:
            for sample in samples:
                LOG.debug(_(
                    "Pipeline %(pipeline)s: Transform sample "
                    "%(smp)s from %(trans)s transformer") % ({'pipeline': self,
                                                              'smp': sample,
                                                              'trans': start}))
                sample = self._transform_sample(start, ctxt, sample)
                if sample:
                    transformed_samples.append(sample)

        if transformed_samples:
            for p in self.publishers:
                try:
                    p.publish_samples(ctxt, transformed_samples)
                except Exception:
                    LOG.exception(_(
                        "Pipeline %(pipeline)s: Continue after error "
                        "from publisher %(pub)s") % ({'pipeline': self,
                                                      'pub': p}))
Example #16
0
def setup_meters_config():
    """Setup the meters definitions from yaml config file."""
    config_file = get_config_file()
    if config_file is not None:
        LOG.debug(_("Meter Definitions configuration file: %s"), config_file)

        with open(config_file) as cf:
            config = cf.read()

        try:
            events_config = yaml.safe_load(config)
        except yaml.YAMLError as err:
            if hasattr(err, 'problem_mark'):
                mark = err.problem_mark
                errmsg = (_("Invalid YAML syntax in Meter Definitions file "
                            "%(file)s at line: %(line)s, column: %(column)s.")
                          % dict(file=config_file,
                                 line=mark.line + 1,
                                 column=mark.column + 1))
            else:
                errmsg = (_("YAML error reading Meter Definitions file "
                            "%(file)s")
                          % dict(file=config_file))
            LOG.error(errmsg)
            raise

    else:
        LOG.debug(_("No Meter Definitions configuration file found!"
                  " Using default config."))
        events_config = []

    LOG.info(_("Meter Definitions: %s"), events_config)

    return events_config
Example #17
0
    def _handle_action(self, action, alarm_id, alarm_name, severity,
                       previous, current, reason, reason_data):
        try:
            action = netutils.urlsplit(action)
        except Exception:
            LOG.error(
                _("Unable to parse action %(action)s for alarm %(alarm_id)s"),
                {'action': action, 'alarm_id': alarm_id})
            return

        try:
            notifier = ceilometer_alarm.NOTIFIERS[action.scheme].obj
        except KeyError:
            scheme = action.scheme
            LOG.error(
                _("Action %(scheme)s for alarm %(alarm_id)s is unknown, "
                  "cannot notify"),
                {'scheme': scheme, 'alarm_id': alarm_id})
            return

        try:
            LOG.debug(_("Notifying alarm %(id)s with action %(act)s") % (
                      {'id': alarm_id, 'act': action}))
            notifier.notify(action, alarm_id, alarm_name, severity,
                            previous, current, reason, reason_data)
        except Exception:
            LOG.exception(_("Unable to notify alarm %s"), alarm_id)
            return
Example #18
0
    def clear_expired_metering_data(self, ttl):
        """Clear expired data from the backend storage system.

        Clearing occurs according to the time-to-live.
        :param ttl: Number of seconds to keep records for.
        """

        session = self._engine_facade.get_session()
        with session.begin():
            end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
            sample_q = (session.query(models.Sample)
                        .filter(models.Sample.timestamp < end))
            rows = sample_q.delete()
            LOG.info(_("%d samples removed from database"), rows)

            if not cfg.CONF.sql_expire_samples_only:
                # remove Meter definitions with no matching samples
                (session.query(models.Meter)
                 .filter(~models.Meter.samples.any())
                 .delete(synchronize_session=False))

                # remove resources with no matching samples
                resource_q = (session.query(models.Resource.internal_id)
                              .filter(~models.Resource.samples.any()))
                resource_subq = resource_q.subquery()
                # remove metadata of cleaned resources
                for table in [models.MetaText, models.MetaBigInt,
                              models.MetaFloat, models.MetaBool]:
                    (session.query(table)
                     .filter(table.id.in_(resource_subq))
                     .delete(synchronize_session=False))
                resource_q.delete(synchronize_session=False)
                LOG.info(_("Expired residual resource and"
                           " meter definition data"))
Example #19
0
    def record_metering_data(self, data):
        # We may have receive only one counter on the wire
        if not isinstance(data, list):
            data = [data]

        for meter in data:
            LOG.debug(_(
                'metering data %(counter_name)s '
                'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s')
                % ({'counter_name': meter['counter_name'],
                    'resource_id': meter['resource_id'],
                    'timestamp': meter.get('timestamp', 'NO TIMESTAMP'),
                    'counter_volume': meter['counter_volume']}))
            if publisher_utils.verify_signature(
                    meter, self.conf.publisher.telemetry_secret):
                try:
                    # Convert the timestamp to a datetime instance.
                    # Storage engines are responsible for converting
                    # that value to something they can store.
                    if meter.get('timestamp'):
                        ts = timeutils.parse_isotime(meter['timestamp'])
                        meter['timestamp'] = timeutils.normalize_time(ts)
                    self.meter_conn.record_metering_data(meter)
                except Exception as err:
                    LOG.exception(_('Failed to record metering data: %s'),
                                  err)
                    # raise the exception to propagate it up in the chain.
                    raise
            else:
                LOG.warning(_(
                    'message signature invalid, discarding message: %r'),
                    meter)
Example #20
0
    def start_udp(self):
        address_family = socket.AF_INET
        if netutils.is_valid_ipv6(cfg.CONF.collector.udp_address):
            address_family = socket.AF_INET6
        udp = socket.socket(address_family, socket.SOCK_DGRAM)
        udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        udp.bind((cfg.CONF.collector.udp_address,
                  cfg.CONF.collector.udp_port))

        self.udp_run = True
        while self.udp_run:
            # NOTE(jd) Arbitrary limit of 64K because that ought to be
            # enough for anybody.
            data, source = udp.recvfrom(64 * units.Ki)
            try:
                sample = msgpack.loads(data, encoding='utf-8')
            except Exception:
                LOG.warn(_("UDP: Cannot decode data sent by %s"), source)
            else:
                try:
                    LOG.debug(_("UDP: Storing %s"), sample)
                    self.dispatcher_manager.map_method('record_metering_data',
                                                       sample)
                except Exception:
                    LOG.exception(_("UDP: Unable to store meter"))
Example #21
0
def get_api_session(conf):
    if not api:
        raise ImportError(_('XenAPI not installed'))

    url = conf.xenapi.connection_url
    username = conf.xenapi.connection_username
    password = conf.xenapi.connection_password
    if not url or password is None:
        raise XenapiException(_('Must specify connection_url, and '
                                'connection_password to use'))

    try:
        session = (api.xapi_local() if url == 'unix://local'
                   else api.Session(url))
        session.login_with_password(username, password)
    except api.Failure as e:
        if e.details[0] == 'HOST_IS_SLAVE':
            master = e.details[1]
            url = swap_xapi_host(url, master)
            try:
                session = api.Session(url)
                session.login_with_password(username, password)
            except api.Failure as es:
                raise XenapiException(_('Could not connect slave host: %s ') %
                                      es.details[0])
        else:
            msg = _("Could not connect to XenAPI: %s") % e.details[0]
            raise XenapiException(msg)
    return session
Example #22
0
 def _post_measure(self, resource_type, resource_id, metric_name,
                   measure_attributes):
     r = self.gnocchi_api.post("%s/v1/resource/%s/%s/metric/%s/measures"
                               % (self.gnocchi_url, resource_type,
                                  resource_id, metric_name),
                               headers=self._get_headers(),
                               data=json.dumps(measure_attributes))
     if r.status_code == 404:
         LOG.debug(_("The metric %(metric_name)s of "
                     "resource %(resource_id)s doesn't exists: "
                     "%(status_code)d"),
                   {'metric_name': metric_name,
                    'resource_id': resource_id,
                    'status_code': r.status_code})
         raise NoSuchMetric
     elif int(r.status_code / 100) != 2:
         raise UnexpectedWorkflowError(
             _("Fail to post measure on metric %(metric_name)s of "
               "resource %(resource_id)s with status: "
               "%(status_code)d: %(msg)s") %
             {'metric_name': metric_name,
              'resource_id': resource_id,
              'status_code': r.status_code,
              'msg': r.text})
     else:
         LOG.debug("Measure posted on metric %s of resource %s",
                   metric_name, resource_id)
Example #23
0
    def _bracketer_calculate(self, request_id):
        """Evaluate the brackelet expression and return a new event if successful."""
        try:
            begin_event = self.cache[request_id][self.begin_event_type]
            end_event = self.cache[request_id][self.end_event_type]

            result = timeutils.delta_seconds(begin_event.generated,
                                             end_event.generated)

            if result < 0:
                LOG.warn(_('brackelet result %(result)s'
                           'from %(begin_event)s: %(end_event)s < 0'),
                         {'result': result,
                          'begin_event': begin_event,
                          'end_event': end_event})
                return

            event_type = self.target.get('event_type')
            message_id = uuid.uuid4()
            when = timeutils.utcnow()
            #End_event has resource_id trait
            traits = deepcopy(end_event.traits)
            latency_trait = models.Trait(self.target_trait_name,
                                         self.target_trait_type,
                                         result)
            traits.append(latency_trait)
            raw = {}

            event = models.Event(message_id, event_type, when, traits, raw)
            print event
            return event
        except Exception:
            LOG.warn(_('Unable to evaluate Evaluate the brackelet expression'))
Example #24
0
def _event_query_to_event_filter(q):
    evt_model_filter = {
        'event_type': None,
        'message_id': None,
        'start_timestamp': None,
        'end_timestamp': None
    }
    filters = _build_rbac_query_filters()
    traits_filter = filters['t_filter']
    admin_proj = filters['admin_proj']

    for i in q:
        if not i.op:
            i.op = 'eq'
        elif i.op not in base.operation_kind:
            error = (_('Operator %(operator)s is not supported. The supported'
                       ' operators are: %(supported)s') %
                     {'operator': i.op, 'supported': base.operation_kind})
            raise base.ClientSideError(error)
        if i.field in evt_model_filter:
            if i.op != 'eq':
                error = (_('Operator %(operator)s is not supported. Only'
                           ' equality operator is available for field'
                           ' %(field)s') %
                         {'operator': i.op, 'field': i.field})
                raise base.ClientSideError(error)
            evt_model_filter[i.field] = i.value
        else:
            trait_type = i.type or 'string'
            traits_filter.append({"key": i.field,
                                  trait_type: i._get_value_as_type(),
                                  "op": i.op})
    return storage.EventFilter(traits_filter=traits_filter,
                               admin_proj=admin_proj, **evt_model_filter)
    def record_events(self, event_models):
        """Write the events to SQL database via sqlalchemy.

        :param event_models: a list of model.Event objects.

        Returns a list of events that could not be saved in a
        (reason, event) tuple. Reasons are enumerated in
        storage.model.Event

        Flush when they're all added, unless new EventTypes or
        TraitTypes are added along the way.
        """
        session = self._engine_facade.get_session()
        events = []
        problem_events = []
        for event_model in event_models:
            event = None
            try:
                with session.begin():
                    event = self._record_event(session, event_model)
            except dbexc.DBDuplicateEntry as e:
                LOG.exception(_("Failed to record duplicated event: %s") % e)
                problem_events.append((api_models.Event.DUPLICATE,
                                       event_model))
            except Exception as e:
                LOG.exception(_('Failed to record event: %s') % e)
                problem_events.append((api_models.Event.UNKNOWN_PROBLEM,
                                       event_model))
            events.append(event)
        return problem_events
Example #26
0
    def validate(self, visibility_field):
        """Validates the query content and does the necessary conversions."""
        if self.original_query.filter is wtypes.Unset:
            self.filter_expr = None
        else:
            try:
                self.filter_expr = json.loads(self.original_query.filter)
                self._validate_filter(self.filter_expr)
            except (ValueError, jsonschema.exceptions.ValidationError) as e:
                raise base.ClientSideError(
                    _("Filter expression not valid: %s") % e)
            self._replace_isotime_with_datetime(self.filter_expr)
            self._convert_operator_to_lower_case(self.filter_expr)
            self._normalize_field_names_for_db_model(self.filter_expr)

        self._force_visibility(visibility_field)

        if self.original_query.orderby is wtypes.Unset:
            self.orderby = None
        else:
            try:
                self.orderby = json.loads(self.original_query.orderby)
                self._validate_orderby(self.orderby)
            except (ValueError, jsonschema.exceptions.ValidationError) as e:
                raise base.ClientSideError(
                    _("Order-by expression not valid: %s") % e)
            self._convert_orderby_to_lower_case(self.orderby)
            self._normalize_field_names_in_orderby(self.orderby)

        self.limit = (None if self.original_query.limit is wtypes.Unset
                      else self.original_query.limit)

        self.limit = v2_utils.enforce_limit(self.limit)
Example #27
0
 def get_samples(self, manager, cache, resources):
     self._inspection_duration = self._record_poll_time()
     for instance in resources:
         instance_name = util.instance_name(instance)
         LOG.debug(_('checking net info for instance %s'), instance.id)
         try:
             vnics = self._get_vnics_for_instance(
                 cache,
                 self.inspector,
                 instance,
             )
             for vnic, info in vnics:
                 LOG.debug(self.NET_USAGE_MESSAGE, instance_name,
                           vnic.name, self._get_rx_info(info),
                           self._get_tx_info(info))
                 yield self._get_sample(instance, vnic, info)
         except virt_inspector.InstanceNotFoundException as err:
             # Instance was deleted while getting samples. Ignore it.
             LOG.debug(_('Exception while getting samples %s'), err)
         except ceilometer.NotImplementedError:
             # Selected inspector does not implement this pollster.
             LOG.debug(_('%(inspector)s does not provide data for '
                         ' %(pollster)s'),
                       {'inspector': self.inspector.__class__.__name__,
                        'pollster': self.__class__.__name__})
         except Exception as err:
             LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
                           {'name': instance_name, 'error': err})
Example #28
0
    def inspect_memory_usage(self, instance, duration=None):
        instance_name = util.instance_name(instance)
        domain = self._get_domain_not_shut_off_or_raise(instance)

        try:
            memory_stats = domain.memoryStats()
            if (memory_stats and
                    memory_stats.get('available') and
                    memory_stats.get('unused')):
                memory_used = (memory_stats.get('available') -
                               memory_stats.get('unused'))
                # Stat provided from libvirt is in KB, converting it to MB.
                memory_used = memory_used / units.Ki
                return virt_inspector.MemoryUsageStats(usage=memory_used)
            else:
                msg = _('Failed to inspect memory usage of instance '
                        '<name=%(name)s, id=%(id)s>, '
                        'can not get info from libvirt.') % {
                    'name': instance_name, 'id': instance.id}
                raise virt_inspector.InstanceNoDataException(msg)
        # memoryStats might launch an exception if the method is not supported
        # by the underlying hypervisor being used by libvirt.
        except libvirt.libvirtError as e:
            msg = _('Failed to inspect memory usage of %(instance_uuid)s, '
                    'can not get info from libvirt: %(error)s') % {
                'instance_uuid': instance.id, 'error': e}
            raise virt_inspector.NoDataException(msg)
Example #29
0
    def _update_duration(self, start_timestamp, end_timestamp):
        # "Clamp" the timestamps we return to the original time
        # range, excluding the offset.
        if (start_timestamp and
                self.duration_start and
                self.duration_start < start_timestamp):
            self.duration_start = start_timestamp
            LOG.debug(_('clamping min timestamp to range'))
        if (end_timestamp and
                self.duration_end and
                self.duration_end > end_timestamp):
            self.duration_end = end_timestamp
            LOG.debug(_('clamping max timestamp to range'))

        # If we got valid timestamps back, compute a duration in seconds.
        #
        # If the min > max after clamping then we know the
        # timestamps on the samples fell outside of the time
        # range we care about for the query, so treat them as
        # "invalid."
        #
        # If the timestamps are invalid, return None as a
        # sentinel indicating that there is something "funny"
        # about the range.
        if (self.duration_start and
                self.duration_end and
                self.duration_start <= self.duration_end):
            self.duration = timeutils.delta_seconds(self.duration_start,
                                                    self.duration_end)
        else:
            self.duration_start = self.duration_end = self.duration = None
Example #30
0
 def get_samples(self, manager, cache, resources):
     for instance in resources:
         try:
             disk_size_info = self._populate_cache(
                 self.inspector,
                 cache,
                 instance,
             )
             for disk_info in self._get_samples(instance, disk_size_info):
                 yield disk_info
         except virt_inspector.InstanceNotFoundException as err:
             # Instance was deleted while getting samples. Ignore it.
             LOG.debug(_('Exception while getting samples %s'), err)
         except virt_inspector.InstanceShutOffException as e:
             LOG.warn(_LW('Instance %(instance_id)s was shut off while '
                          'getting samples of %(pollster)s: %(exc)s'),
                      {'instance_id': instance.id,
                       'pollster': self.__class__.__name__, 'exc': e})
         except ceilometer.NotImplementedError:
             # Selected inspector does not implement this pollster.
             LOG.debug(_('%(inspector)s does not provide data for '
                         ' %(pollster)s'), (
                       {'inspector': manager.inspector.__class__.__name__,
                        'pollster': self.__class__.__name__}))
         except Exception as err:
             instance_name = util.instance_name(instance)
             LOG.exception(_('Ignoring instance %(name)s '
                             '(%(instance_id)s) : %(error)s') % (
                           {'name': instance_name,
                            'instance_id': instance.id,
                            'error': err}))
Example #31
0
    def _configure_main_queue_listeners(self, pipe_manager,
                                        event_pipe_manager):
        notification_manager = self._get_notifications_manager(pipe_manager)
        if not list(notification_manager):
            LOG.warning(_('Failed to load any notification handlers for %s'),
                        self.NOTIFICATION_NAMESPACE)

        ack_on_error = cfg.CONF.notification.ack_on_event_error

        endpoints = []
        if cfg.CONF.notification.store_events:
            endpoints.append(
                event_endpoint.EventsNotificationEndpoint(event_pipe_manager))

        targets = []
        for ext in notification_manager:
            handler = ext.obj
            if (cfg.CONF.notification.disable_non_metric_meters
                    and isinstance(handler, base.NonMetricNotificationBase)):
                continue
            LOG.debug(
                'Event types from %(name)s: %(type)s'
                ' (ack_on_error=%(error)s)', {
                    'name': ext.name,
                    'type': ', '.join(handler.event_types),
                    'error': ack_on_error
                })
            # NOTE(gordc): this could be a set check but oslo_messaging issue
            # https://bugs.launchpad.net/oslo.messaging/+bug/1398511
            # This ensures we don't create multiple duplicate consumers.
            for new_tar in handler.get_targets(cfg.CONF):
                if new_tar not in targets:
                    targets.append(new_tar)
            endpoints.append(handler)

        urls = cfg.CONF.notification.messaging_urls or [None]
        for url in urls:
            transport = messaging.get_transport(url)
            listener = messaging.get_batch_notification_listener(
                transport,
                targets,
                endpoints,
                batch_size=cfg.CONF.notification.batch_size,
                batch_timeout=cfg.CONF.notification.batch_timeout)
            listener.start()
            self.listeners.append(listener)
Example #32
0
 def publish_events(self, events):
     if events:
         for p in self.publishers:
             try:
                 p.publish_events(events)
             except Exception:
                 LOG.exception(
                     _("Pipeline %(pipeline)s: %(status)s"
                       " after error from publisher %(pub)s") %
                     ({
                         'pipeline': self,
                         'status':
                         'Continue' if self.multi_publish else 'Exit',
                         'pub': p
                     }))
                 if not self.multi_publish:
                     raise
Example #33
0
    def get_all(self, q=None, limit=None):
        """Return all known samples, based on the data recorded so far.

        :param q: Filter rules for the samples to be returned.
        :param limit: Maximum number of samples to be returned.
        """

        rbac.enforce('get_samples', pecan.request)

        q = q or []

        if limit and limit < 0:
            raise base.ClientSideError(_("Limit must be positive"))
        kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__)
        f = storage.SampleFilter(**kwargs)
        return map(Sample.from_db_model,
                   pecan.request.storage_conn.get_samples(f, limit=limit))
Example #34
0
    def update_resource(self, resource_type, resource_id, resource_extra):
        r = self._session.patch(
            "%s/v1/resource/%s/%s" %
            (self._gnocchi_url, resource_type, resource_id),
            headers=self._get_headers(),
            data=json.dumps(resource_extra))

        if r.status_code // 100 != 2:
            raise UnexpectedError(
                _("Resource %(resource_id)s update failed with "
                  "status: %(status_code)d: %(msg)s") % {
                      'resource_id': resource_id,
                      'status_code': r.status_code,
                      'msg': r.text
                  })
        else:
            LOG.debug("Resource %s updated", resource_id)
Example #35
0
    def __init__(self, source=None, target=None, **kwargs):
        """Initialize transformer with configured parameters.

        :param source: dict containing source sample unit
        :param target: dict containing target sample name, type,
                       unit and scaling factor (a missing value
                       connotes no change)
        """
        source = source or {}
        target = target or {}
        self.source = source
        self.target = target
        self.scale = target.get('scale')
        LOG.debug(_('scaling conversion transformer with source:'
                    ' %(source)s target: %(target)s:')
                  % {'source': source,
                     'target': target})
        super(ScalingTransformer, self).__init__(**kwargs)
Example #36
0
    def _setup_transformers(self, cfg, transformer_manager):
        transformers = []
        for transformer in self.transformer_cfg:
            parameter = transformer['parameters'] or {}
            try:
                ext = transformer_manager[transformer['name']]
            except KeyError:
                raise PipelineException(
                    "No transformer named %s loaded" % transformer['name'],
                    cfg)
            transformers.append(ext.plugin(**parameter))
            LOG.info(_(
                "Pipeline %(pipeline)s: Setup transformer instance %(name)s "
                "with parameter %(param)s") % ({'pipeline': self,
                                                'name': transformer['name'],
                                                'param': parameter}))

        return transformers
Example #37
0
    def _update_resource(self, resource_type, resource_id,
                         resource_attributes):
        r = self.gnocchi_api.patch(
            "%s/v1/resource/%s/%s" %
            (self.gnocchi_url, resource_type, resource_id),
            headers=self._get_headers(),
            data=json.dumps(resource_attributes))

        if int(r.status_code / 100) != 2:
            raise UnexpectedWorkflowError(
                _("Resource %(resource_id)s update failed with "
                  "status: %(status_code)d: %(msg)s") % {
                      'resource_id': resource_id,
                      'status_code': r.status_code,
                      'msg': r.text
                  })
        else:
            LOG.debug("Resource %s updated", resource_id)
Example #38
0
    def authenticate(self):
        path = '/authenticate'
        data = {
            'username': self.username,
            'password': self.password,
            'domain': self.domain
        }

        req_params = self._get_req_params(data=data)
        url = urlparse.urljoin(self.endpoint, path)
        resp = requests.post(url, **req_params)
        if resp.status_code != 302:
            raise OpencontrailAPIFailed(
                _('Opencontrail API returned %(status)s %(reason)s') % {
                    'status': resp.status_code,
                    'reason': resp.reason
                })
        self.sid = resp.cookies['connect.sid']
Example #39
0
    def request(self, path, fqdn_uuid, data=None):
        req_data = copy.copy(self.data)
        if data:
            req_data.update(data)

        req_params = self._get_req_params(data=req_data)

        url = urlparse.urljoin(self.endpoint, path + fqdn_uuid)
        self._log_req(url, req_params)
        resp = requests.get(url, **req_params)
        self._log_res(resp)

        if resp.status_code != 200:
            raise OpencontrailAPIFailed(
                _('Opencontrail API returned %(status)s %(reason)s') %
                {'status': resp.status_code, 'reason': resp.reason})

        return resp
Example #40
0
    def _get_connection_pool(conf):
        """Return a connection pool to the database.

        .. note::

          The tests use a subclass to override this and return an
          in-memory connection pool.
        """
        LOG.debug(
            _('connecting to HBase on %(host)s:%(port)s') %
            ({
                'host': conf['host'],
                'port': conf['port']
            }))
        return happybase.ConnectionPool(size=100,
                                        host=conf['host'],
                                        port=conf['port'],
                                        table_prefix=conf['table_prefix'])
Example #41
0
 def __init__(self, target=None, **kwargs):
     super(ArithmeticTransformer, self).__init__(**kwargs)
     target = target or {}
     self.target = target
     self.expr = target.get('expr', '')
     self.expr_escaped, self.escaped_names = self.parse_expr(self.expr)
     self.required_meters = list(self.escaped_names.values())
     self.misconfigured = len(self.required_meters) == 0
     if not self.misconfigured:
         self.reference_meter = self.required_meters[0]
         # convert to set for more efficient contains operation
         self.required_meters = set(self.required_meters)
         self.cache = collections.defaultdict(dict)
         self.latest_timestamp = None
     else:
         LOG.warn(
             _('Arithmetic transformer must use at least one'
               ' meter in expression \'%s\''), self.expr)
Example #42
0
    def get_one(self, resource_id):
        """Retrieve details about one resource.

        :param resource_id: The UUID of the resource.
        """
        rbac.enforce('get_resource', pecan.request)
        # In case we have special character in resource id, for example, swift
        # can generate samples with resource id like
        # 29f809d9-88bb-4c40-b1ba-a77a1fcf8ceb/glance
        resource_id = urllib.parse.unquote(resource_id)

        authorized_project = rbac.get_limited_to_project(pecan.request.headers)
        resources = list(pecan.request.storage_conn.get_resources(
            resource=resource_id, project=authorized_project))
        if not resources:
            raise base.EntityNotFound(_('Resource'), resource_id)
        return Resource.from_db_and_links(resources[0],
                                          self._resource_links(resource_id))
Example #43
0
    def discover(self, manager, param=None):
        """Discover resources to monitor.

        instance_get_all will return all instances if last_run is None,
        and will return only the instances changed since the last_run time.
        """
        try:
            instances = self.nova_cli.instance_get_all(self.last_run)
        except Exception:
            # NOTE(zqfan): instance_get_all is wrapped and will log exception
            # when there is any error. It is no need to raise it again and
            # print one more time.
            return []

        for instance in instances:
            if getattr(instance, 'OS-EXT-STS:vm_state',
                       None) in ['deleted', 'error']:
                self.instances.pop(instance.id, None)
            else:
                self.instances[instance.id] = instance
        self.last_run = timeutils.utcnow(True).isoformat()

        resources = []
        for instance in self.instances.values():
            try:
                ip_address = self._address(instance, 'addr')
                final_address = self._make_resource_url(ip_address)

                resource = {
                    'resource_id': instance.id,
                    'resource_url': final_address,
                    'mac_addr': self._address(instance,
                                              'OS-EXT-IPS-MAC:mac_addr'),
                    'image_id': instance.image['id'],
                    'flavor_id': instance.flavor['id']
                }

                resources.append(resource)
            except KeyError:
                LOG.error(
                    _("Couldn't obtain IP address of "
                      "instance %s") % instance.id)

        return resources
Example #44
0
 def get_samples(self, manager, cache, resources):
     self._inspection_duration = self._record_poll_time()
     self.inspector.purge_inspection_cache()
     for instance in resources:
         instance_name = util.instance_name(instance)
         LOG.debug('checking net info for instance %s', instance.id)
         try:
             vnics = self._get_vnics_for_instance(
                 cache,
                 self.inspector,
                 instance,
             )
             for vnic, info in vnics:
                 LOG.debug(self.NET_USAGE_MESSAGE, instance_name, vnic.name,
                           self._get_rx_info(info), self._get_tx_info(info))
                 yield self._get_sample(instance, vnic, info)
         except virt_inspector.NoDataException:
             LOG.debug(
                 'Inspector has not enough data for the %(pollster)s '
                 'pollster.', {'pollster': self.__class__.__name__})
         except virt_inspector.InstanceNotFoundException as err:
             # Instance was deleted while getting samples. Ignore it.
             LOG.debug('Exception while getting samples %s', err)
         except virt_inspector.InstanceShutOffException as e:
             LOG.debug(
                 'Instance %(instance_id)s was shut off while '
                 'getting samples of %(pollster)s: %(exc)s', {
                     'instance_id': instance.id,
                     'pollster': self.__class__.__name__,
                     'exc': e
                 })
         except ceilometer.NotImplementedError:
             # Selected inspector does not implement this pollster.
             LOG.debug(
                 '%(inspector)s does not provide data for '
                 ' %(pollster)s', {
                     'inspector': self.inspector.__class__.__name__,
                     'pollster': self.__class__.__name__
                 })
         except Exception as err:
             LOG.exception(_('Ignoring instance %(name)s: %(error)s'), {
                 'name': instance_name,
                 'error': err
             })
Example #45
0
    def connect(self, url):
        connection_options = pymongo.uri_parser.parse_uri(url)
        del connection_options['database']
        del connection_options['username']
        del connection_options['password']
        del connection_options['collection']
        pool_key = tuple(connection_options)

        if pool_key in self._pool:
            client = self._pool.get(pool_key)()
            if client:
                return client
        splitted_url = netutils.urlsplit(url)
        log_data = {'db': splitted_url.scheme,
                    'nodelist': connection_options['nodelist']}
        LOG.info(_('Connecting to %(db)s on %(nodelist)s') % log_data)
        client = self._mongo_connect(url)
        self._pool[pool_key] = weakref.ref(client)
        return client
Example #46
0
    def __init__(self, conf, parsed_url):
        self.conf = conf
        self._retry_interval = conf.monasca.client_retry_interval
        self._max_retries = conf.monasca.client_max_retries or 1
        self._enable_api_pagination = conf.monasca.enable_api_pagination
        # NOTE(zqfan): There are many concurrency requests while using
        # Ceilosca, to save system resource, we don't retry too many times.
        if self._max_retries < 0 or self._max_retries > 10:
            LOG.warning('Reduce max retries from %s to 10', self._max_retries)
            self._max_retries = 10

        monasca_auth_group = conf.monasca.auth_section
        session = keystone_client.get_session(conf, group=monasca_auth_group)

        self._endpoint = parsed_url.netloc + parsed_url.path
        LOG.info(
            _("monasca_client: using %s as Monasca endpoint") % self._endpoint)

        self._get_client(session)
Example #47
0
    def notify(action, alarm_id, alarm_name, severity, previous,
               current, reason, reason_data, headers=None):
        headers = headers or {}
        if not headers.get('x-openstack-request-id'):
            headers['x-openstack-request-id'] = context.generate_request_id()

        LOG.info(_(
            "Notifying alarm %(alarm_name)s %(alarm_id)s with severity"
            " %(severity)s from %(previous)s to %(current)s with action "
            "%(action)s because %(reason)s. request-id: %(request_id)s ") %
            ({'alarm_name': alarm_name, 'alarm_id': alarm_id,
              'severity': severity, 'previous': previous,
              'current': current, 'action': action, 'reason': reason,
              'request_id': headers['x-openstack-request-id']}))
        body = {'alarm_name': alarm_name, 'alarm_id': alarm_id,
                'severity': severity, 'previous': previous,
                'current': current, 'reason': reason,
                'reason_data': reason_data}
        headers['content-type'] = 'application/json'
        kwargs = {'data': jsonutils.dumps(body),
                  'headers': headers}

        if action.scheme == 'https':
            default_verify = int(cfg.CONF.alarm.rest_notifier_ssl_verify)
            options = urlparse.parse_qs(action.query)
            verify = bool(int(options.get('ceilometer-alarm-ssl-verify',
                                          [default_verify])[-1]))
            kwargs['verify'] = verify

            cert = cfg.CONF.alarm.rest_notifier_certificate_file
            key = cfg.CONF.alarm.rest_notifier_certificate_key
            if cert:
                kwargs['cert'] = (cert, key) if key else cert

        # FIXME(rhonjo): Retries are automatically done by urllib3 in requests
        # library. However, there's no interval between retries in urllib3
        # implementation. It will be better to put some interval between
        # retries (future work).
        max_retries = cfg.CONF.alarm.rest_notifier_max_retries
        session = requests.Session()
        session.mount(action.geturl(),
                      requests.adapters.HTTPAdapter(max_retries=max_retries))
        eventlet.spawn_n(session.post, action.geturl(), **kwargs)
Example #48
0
 def notify(self, alarm, previous, reason, reason_data):
     actions = getattr(alarm, models.Alarm.ALARM_ACTIONS_MAP[alarm.state])
     if not actions:
         LOG.debug(_('alarm %(alarm_id)s has no action configured '
                     'for state transition from %(previous)s to '
                     'state %(state)s, skipping the notification.') %
                   {'alarm_id': alarm.alarm_id,
                    'previous': previous,
                    'state': alarm.state})
         return
     self.client.cast(context.get_admin_context(),
                      'notify_alarm', data={
                          'actions': actions,
                          'alarm_id': alarm.alarm_id,
                          'alarm_name': alarm.name,
                          'previous': previous,
                          'current': alarm.state,
                          'reason': six.text_type(reason),
                          'reason_data': reason_data})
Example #49
0
    def _create_resource(self, resource_type, resource_id,
                         resource_attributes):
        r = self.gnocchi_api.post("%s/v1/resource/%s"
                                  % (self.gnocchi_url, resource_type),
                                  headers=self._get_headers(),
                                  data=json.dumps(resource_attributes))
        if r.status_code == 409:
            LOG.debug("Resource %s already exists", resource_id)
            raise ResourceAlreadyExists

        elif int(r.status_code / 100) != 2:
            raise UnexpectedWorkflowError(
                _("Resource %(resource_id)s creation failed with "
                  "status: %(status_code)d: %(msg)s") %
                {'resource_id': resource_id,
                 'status_code': r.status_code,
                 'msg': r.text})
        else:
            LOG.debug("Resource %s created", resource_id)
Example #50
0
 def _lookup_by_uuid(self, instance):
     instance_name = util.instance_name(instance)
     try:
         return self.connection.lookupByUUIDString(instance.id)
     except libvirt.libvirtError as ex:
         if libvirt_utils.is_disconnection_exception(ex):
             raise
         msg = _("Error from libvirt while looking up instance "
                 "<name=%(name)s, id=%(id)s>: "
                 "[Error Code %(error_code)s] "
                 "%(ex)s") % {
                     'name': instance_name,
                     'id': instance.id,
                     'error_code': ex.get_error_code(),
                     'ex': ex
                 }
         raise virt_inspector.InstanceNotFoundException(msg)
     except Exception as ex:
         raise virt_inspector.InspectorException(six.text_type(ex))
Example #51
0
 def _transform_sample(self, start, ctxt, sample):
     try:
         for transformer in self.transformers[start:]:
             sample = transformer.handle_sample(ctxt, sample)
             if not sample:
                 LOG.debug(
                     "Pipeline %(pipeline)s: Sample dropped by "
                     "transformer %(trans)s", {'pipeline': self,
                                               'trans': transformer})
                 return
         return sample
     except Exception as err:
         # TODO(gordc): only use one log level.
         LOG.warning(_("Pipeline %(pipeline)s: "
                       "Exit after error from transformer "
                       "%(trans)s for %(smp)s") % ({'pipeline': self,
                                                    'trans': transformer,
                                                    'smp': sample}))
         LOG.exception(err)
    def create_resource(self, resource_type, resource):
        r = self._session.post("%s/v1/resource/%s"
                               % (self._gnocchi_url, resource_type),
                               headers=self._get_headers(),
                               data=json.dumps(resource))

        if r.status_code == 409:
            LOG.debug("Resource %s already exists", resource['id'])
            raise ResourceAlreadyExists

        elif r.status_code // 100 != 2:
            raise UnexpectedError(
                _("Resource %(resource_id)s creation failed with "
                  "status: %(status_code)d: %(msg)s") %
                {'resource_id': resource['id'],
                 'status_code': r.status_code,
                 'msg': r.text})
        else:
            LOG.debug("Resource %s created", resource['id'])
Example #53
0
    def get_all(self, q=None, limit=None):
        """Return samples for the meter.

        :param q: Filter rules for the data to be returned.
        :param limit: Maximum number of samples to return.
        """

        rbac.enforce('get_samples', pecan.request)

        q = q or []
        if limit and limit < 0:
            raise base.ClientSideError(_("Limit must be positive"))
        kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__)
        kwargs['meter'] = self.meter_name
        f = storage.SampleFilter(**kwargs)
        return [
            OldSample.from_db_model(e)
            for e in pecan.request.storage_conn.get_samples(f, limit=limit)
        ]
Example #54
0
    def get_targets(self, conf):
        """Return a sequence of oslo.messaging.Target.

        Sequence is defining the exchange and topics to be connected for this
        plugin.
        :param conf: Configuration.
        """

        # TODO(sileht): Backwards compatibility, remove in J+2
        if hasattr(self, 'get_exchange_topics'):
            LOG.warn(_('get_exchange_topics API of NotificationPlugin is'
                       'deprecated, implements get_targets instead.'))

            targets = []
            for exchange, topics in self.get_exchange_topics(conf):
                targets.extend(oslo_messaging.Target(topic=topic,
                                                     exchange=exchange)
                               for topic in topics)
            return targets
Example #55
0
    def handle_sample(self, s):
        """Handle a sample, converting if necessary."""
        # LOG.debug('handling sample %s', s)
        key = s.name + s.resource_id
        prev = self.cache.get(key)
        timestamp = timeutils.parse_isotime(s.timestamp)
        self.cache[key] = (s.volume, timestamp, s.monotonic_time)

        if prev:
            prev_volume = prev[0]
            prev_timestamp = prev[1]
            prev_monotonic_time = prev[2]
            if (prev_monotonic_time is not None and
                    s.monotonic_time is not None):
                # NOTE(sileht): Prefer high precision timer
                time_delta = s.monotonic_time - prev_monotonic_time
            else:
                time_delta = timeutils.delta_seconds(prev_timestamp, timestamp)
            # disallow violations of the arrow of time
            if time_delta < 0:
                LOG.warning(_('dropping out of time order sample: %s'), (s,))
                # Reset the cache to the newer sample.
                self.cache[key] = prev
                return None
            # we only allow negative volume deltas for noncumulative
            # samples, whereas for cumulative we assume that a reset has
            # occurred in the interim so that the current volume gives a
            # lower bound on growth
            volume_delta = (s.volume - prev_volume
                            if (prev_volume <= s.volume or
                                s.type != sample.TYPE_CUMULATIVE)
                            else s.volume)
            rate_of_change = ((1.0 * volume_delta / time_delta)
                              if time_delta else 0.0)

            s = self._convert(s, rate_of_change)
            # LOG.debug('converted to: %s', s)
        else:
            # LOG.warning(_('dropping sample with no predecessor: %s'),
            #            (s,))
            s = None
        return s
Example #56
0
 def _configure_pipeline_listeners(self):
     self.pipeline_listeners = []
     ev_pipes = []
     if cfg.CONF.notification.store_events:
         ev_pipes = self.event_pipeline_manager.pipelines
     partitioned = self.partition_coordinator.extract_my_subset(
         self.group_id, self.pipeline_manager.pipelines + ev_pipes)
     transport = messaging.get_transport()
     for pipe in partitioned:
         LOG.debug(_('Pipeline endpoint: %s'), pipe.name)
         pipe_endpoint = (pipeline.EventPipelineEndpoint if isinstance(
             pipe, pipeline.EventPipeline) else
                          pipeline.SamplePipelineEndpoint)
         listener = messaging.get_notification_listener(
             transport, [
                 oslo_messaging.Target(topic='%s-%s' %
                                       (self.NOTIFICATION_IPC, pipe.name))
             ], [pipe_endpoint(self.ctxt, pipe)])
         listener.start()
         self.pipeline_listeners.append(listener)
Example #57
0
    def publish_samples(self, samples):
        """Send a metering message for publishing

        :param samples: Samples from pipeline after transformation
        """

        for sample in samples:
            msg = utils.meter_message_from_counter(
                sample, cfg.CONF.publisher.telemetry_secret)
            host = self.host
            port = self.port
            LOG.debug("Publishing sample %(msg)s over UDP to "
                      "%(host)s:%(port)d", {'msg': msg, 'host': host,
                                            'port': port})
            try:
                self.socket.sendto(msgpack.dumps(msg),
                                   (self.host, self.port))
            except Exception as e:
                LOG.warning(_("Unable to send sample over UDP"))
                LOG.exception(e)
Example #58
0
    def __init__(self, cfg):
        """Setup the polling according to config.

        The configuration is the sources half of the Pipeline Config.
        """
        self.sources = []
        if not ('sources' in cfg and 'sinks' in cfg):
            raise PipelineException("Both sources & sinks are required", cfg)
        LOG.info(_('detected decoupled pipeline config format'))

        unique_names = set()
        for s in cfg.get('sources', []):
            name = s.get('name')
            if name in unique_names:
                raise PipelineException("Duplicated source names: %s" % name,
                                        self)
            else:
                unique_names.add(name)
                self.sources.append(SampleSource(s))
        unique_names.clear()
Example #59
0
    def inspect_vnics(self, instance):
        instance_name = util.instance_name(instance)
        domain = self._lookup_by_uuid(instance)
        state = domain.info()[0]
        if state == libvirt.VIR_DOMAIN_SHUTOFF:
            LOG.warn(
                _('Failed to inspect vnics of instance Name '
                  '%(instance_name)s UUID %(instance_uuid)s, '
                  'domain is in state of SHUTOFF'), {
                      'instance_name': instance_name,
                      'instance_uuid': instance.id
                  })
            return
        tree = etree.fromstring(domain.XMLDesc(0))
        for iface in tree.findall('devices/interface'):
            target = iface.find('target')
            if target is not None:
                name = target.get('dev')
            else:
                continue
            mac = iface.find('mac')
            if mac is not None:
                mac_address = mac.get('address')
            else:
                continue
            fref = iface.find('filterref')
            if fref is not None:
                fref = fref.get('filter')

            params = dict((p.get('name').lower(), p.get('value'))
                          for p in iface.findall('filterref/parameter'))
            interface = virt_inspector.Interface(name=name,
                                                 mac=mac_address,
                                                 fref=fref,
                                                 parameters=params)
            dom_stats = domain.interfaceStats(name)
            stats = virt_inspector.InterfaceStats(rx_bytes=dom_stats[0],
                                                  rx_packets=dom_stats[1],
                                                  tx_bytes=dom_stats[4],
                                                  tx_packets=dom_stats[5])
            yield (interface, stats)
Example #60
0
 def process_notification(self, message):
     LOG.debug(_('Image notification %r') % message)
     user_id = message['payload']['user_id']
     tenant_id = message['payload']['tenant_id']
     res_id = '%s_%s' % (message['payload']['image_meta']['base_image_ref'],
                         message['payload']['instance_id'])
     res_name = message['payload']['image_meta']['image_name']
     res_type = 'image'
     # same as the instance message_id
     message_id = message['message_id']
     timestamp = message['timestamp']
     event_type = message['event_type']
     res_meta = {
         'os_type':
         message['payload']['os_type'],
         'architecture':
         message['payload']['architecture'],
         'image_ref_url':
         message['payload']['image_ref_url'],
         'min_disk':
         message['payload']['image_meta']['min_disk'],
         'container_format':
         message['payload']['image_meta']['container_format'],
         'min_ram':
         message['payload']['image_meta']['min_ram'],
         'disk_format':
         message['payload']['image_meta']['disk_format'],
         'base_image_ref':
         message['payload']['image_meta']['base_image_ref']
     }
     return {
         'message_id': message_id,
         'res_id': res_id,
         'res_name': res_name,
         'res_meta': res_meta,
         'res_type': res_type,
         'event_type': event_type,
         'timestamp': timestamp,
         'user_id': user_id,
         'project_id': tenant_id
     }