Пример #1
0
 def treat_metric(self, metric_name, metric_type, value, sampling):
     metric_name += "|" + metric_type
     if metric_type == "ms":
         if sampling is not None:
             raise ValueError(
                 "Invalid sampling for ms: `%d`, should be none"
                 % sampling)
         self.times[metric_name] = storage.Measure(
             utils.utcnow(), value)
     elif metric_type == "g":
         if sampling is not None:
             raise ValueError(
                 "Invalid sampling for g: `%d`, should be none"
                 % sampling)
         self.gauges[metric_name] = storage.Measure(
             utils.utcnow(), value)
     elif metric_type == "c":
         sampling = 1 if sampling is None else sampling
         if metric_name in self.counters:
             current_value = self.counters[metric_name].value
         else:
             current_value = 0
         self.counters[metric_name] = storage.Measure(
             utils.utcnow(),
             current_value + (value * (1 / sampling)))
     # TODO(jd) Support "set" type
     # elif metric_type == "s":
     #     pass
     else:
         raise ValueError("Unknown metric type `%s'" % metric_type)
Пример #2
0
class ResourceMixin(ResourceJsonifier):
    @declarative.declared_attr
    def __table_args__(cls):
        return (sqlalchemy.CheckConstraint('started_at <= ended_at',
                                           name="ck_started_before_ended"),
                COMMON_TABLES_ARGS)

    @declarative.declared_attr
    def type(cls):
        return sqlalchemy.Column(
            sqlalchemy.String(255),
            sqlalchemy.ForeignKey('resource_type.name',
                                  ondelete="RESTRICT",
                                  name="fk_%s_resource_type_name" %
                                  cls.__tablename__),
            nullable=False)

    creator = sqlalchemy.Column(sqlalchemy.String(255))
    started_at = sqlalchemy.Column(TimestampUTC,
                                   nullable=False,
                                   default=lambda: utils.utcnow())
    revision_start = sqlalchemy.Column(TimestampUTC,
                                       nullable=False,
                                       default=lambda: utils.utcnow())
    ended_at = sqlalchemy.Column(TimestampUTC)
    user_id = sqlalchemy.Column(sqlalchemy.String(255))
    project_id = sqlalchemy.Column(sqlalchemy.String(255))
    original_resource_id = sqlalchemy.Column(sqlalchemy.String(255),
                                             nullable=False)
Пример #3
0
 def treat_metric(self, metric_name, metric_type, value, sampling):
     metric_name += "|" + metric_type
     if metric_type == "ms":
         if sampling is not None:
             raise ValueError(
                 "Invalid sampling for ms: `%d`, should be none"
                 % sampling)
         self.times[metric_name] = storage.Measure(
             utils.dt_in_unix_ns(utils.utcnow()), value)
     elif metric_type == "g":
         if sampling is not None:
             raise ValueError(
                 "Invalid sampling for g: `%d`, should be none"
                 % sampling)
         self.gauges[metric_name] = storage.Measure(
             utils.dt_in_unix_ns(utils.utcnow()), value)
     elif metric_type == "c":
         sampling = 1 if sampling is None else sampling
         if metric_name in self.counters:
             current_value = self.counters[metric_name].value
         else:
             current_value = 0
         self.counters[metric_name] = storage.Measure(
             utils.dt_in_unix_ns(utils.utcnow()),
             current_value + (value * (1 / sampling)))
     # TODO(jd) Support "set" type
     # elif metric_type == "s":
     #     pass
     else:
         raise ValueError("Unknown metric type `%s'" % metric_type)
Пример #4
0
    def treat_metric(self, host, metric_name, metric_type,
                     value):
        """Collectd.

        Statistics in collectd consist of a value list. A value list includes:
        Values, can be one of:
        Derive: used for values where a change in the value since it's last
        been read is of interest. Can be used to calculate and store a rate.
        Counter: similar to derive values, but take the possibility of a
        counter wrap around into consideration.
        Gauge: used for values that are stored as is.
        Absolute: used for counters that are reset after reading.

        """

        if metric_type == "absolute":
            if host not in self.absolute:
                self.absolute[host] = {}
            self.absolute[host][metric_name] = incoming.Measure(
                utils.dt_in_unix_ns(utils.utcnow()), value)
        elif metric_type == "gauge":
            if host not in self.gauges:
                self.gauges[host] = {}
            self.gauges[host][metric_name] = incoming.Measure(
                utils.dt_in_unix_ns(utils.utcnow()), value)
        elif metric_type == "counter" or metric_type == "derive":
            if host not in self.counters:
                self.counters[host] = {}
            self.counters[host][metric_name] = incoming.Measure(
                utils.dt_in_unix_ns(utils.utcnow()), value)
        else:
            raise ValueError("Unknown metric type '%s'" % metric_type)
Пример #5
0
class ResourceType(Base, GnocchiBase, resource_type.ResourceType):
    __tablename__ = 'resource_type'
    __table_args__ = (
        sqlalchemy.UniqueConstraint("tablename",
                                    name="uniq_resource_type0tablename"),
        COMMON_TABLES_ARGS,
    )

    name = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True,
                             nullable=False)
    tablename = sqlalchemy.Column(sqlalchemy.String(35), nullable=False)
    attributes = sqlalchemy.Column(ResourceTypeAttributes)
    state = sqlalchemy.Column(sqlalchemy.Enum("active", "creating",
                                              "creation_error", "deleting",
                                              "deletion_error", "updating",
                                              "updating_error",
                                              name="resource_type_state_enum"),
                              nullable=False,
                              server_default="creating")
    updated_at = sqlalchemy.Column(types.TimestampUTC, nullable=False,
                                   # NOTE(jd): We would like to use
                                   # sqlalchemy.func.now, but we can't
                                   # because the type of PreciseTimestamp in
                                   # MySQL is not a Timestamp, so it would
                                   # not store a timestamp but a date as an
                                   # integer.
                                   default=lambda: utils.utcnow())

    def to_baseclass(self):
        cols = {}
        for attr in self.attributes:
            cols[attr.name] = sqlalchemy.Column(attr.satype,
                                                nullable=not attr.required)
        return type(str("%s_base" % self.tablename), (object, ), cols)
Пример #6
0
def _inject(inc,
            coord,
            store,
            idx,
            metrics,
            measures,
            archive_policy_name="low",
            process=False,
            interval=None):
    LOG.info("Creating %d metrics", metrics)
    with utils.StopWatch() as sw:
        metric_ids = [
            idx.create_metric(uuid.uuid4(), "admin", archive_policy_name).id
            for _ in range(metrics)
        ]
    LOG.info("Created %d metrics in %.2fs", metrics, sw.elapsed())

    LOG.info("Generating %d measures per metric for %d metrics… ", measures,
             metrics)
    now = numpy.datetime64(utils.utcnow())
    with utils.StopWatch() as sw:
        measures = {
            m_id: [
                incoming.Measure(now + numpy.timedelta64(seconds=s),
                                 random.randint(-999999, 999999))
                for s in range(measures)
            ]
            for m_id in metric_ids
        }
    LOG.info("… done in %.2fs", sw.elapsed())

    interval_timer = utils.StopWatch().start()

    while True:
        interval_timer.reset()
        with utils.StopWatch() as sw:
            inc.add_measures_batch(measures)
        total_measures = sum(map(len, measures.values()))
        LOG.info("Pushed %d measures in %.2fs", total_measures, sw.elapsed())

        if process:
            c = chef.Chef(coord, inc, idx, store)

            with utils.StopWatch() as sw:
                for s in inc.iter_sacks():
                    c.process_new_measures_for_sack(s, blocking=True)
            LOG.info("Processed %d sacks in %.2fs", inc.NUM_SACKS,
                     sw.elapsed())
            LOG.info("Speed: %.2f measures/s",
                     float(total_measures) / sw.elapsed())

        if interval is None:
            break
        time.sleep(max(0, interval - interval_timer.elapsed()))

    return total_measures
Пример #7
0
    def todo():
        metric = index.create_metric(
            uuid.uuid4(),
            creator=conf.creator,
            archive_policy_name=conf.archive_policy_name)

        for _ in six.moves.range(conf.batch_of_measures):
            measures = [
                incoming.Measure(
                    utils.dt_in_unix_ns(utils.utcnow()), random.random())
                for __ in six.moves.range(conf.measures_per_batch)]
            instore.add_measures(metric, measures)
Пример #8
0
 def on_message(self, event):
     json_message = ujson.loads(event.message.body)
     timestamp = utils.dt_in_unix_ns(utils.utcnow())
     measures_by_host_and_name = sorted(
         ((message["host"], self._serialize_identifier(index,
                                                       message), value)
          for message in json_message
          for index, value in enumerate(message["values"])))
     for (host,
          name), values in itertools.groupby(measures_by_host_and_name,
                                             key=lambda x: x[0:2]):
         measures = (incoming.Measure(timestamp, v[2]) for v in values)
         self.processor.add_measures(host, name, measures)
Пример #9
0
def _inject(inc, coord, store, idx,
            metrics, measures, archive_policy_name="low", process=False,
            interval=None):
    LOG.info("Creating %d metrics", metrics)
    with utils.StopWatch() as sw:
        metric_ids = [
            idx.create_metric(uuid.uuid4(), "admin",
                              archive_policy_name).id
            for _ in range(metrics)
        ]
    LOG.info("Created %d metrics in %.2fs", metrics, sw.elapsed())

    LOG.info("Generating %d measures per metric for %d metrics… ",
             measures, metrics)
    now = numpy.datetime64(utils.utcnow())
    with utils.StopWatch() as sw:
        measures = {
            m_id: [incoming.Measure(
                now + numpy.timedelta64(seconds=s),
                random.randint(-999999, 999999)) for s in range(measures)]
            for m_id in metric_ids
        }
    LOG.info("… done in %.2fs", sw.elapsed())

    interval_timer = utils.StopWatch().start()

    while True:
        interval_timer.reset()
        with utils.StopWatch() as sw:
            inc.add_measures_batch(measures)
        total_measures = sum(map(len, measures.values()))
        LOG.info("Pushed %d measures in %.2fs",
                 total_measures,
                 sw.elapsed())

        if process:
            c = chef.Chef(coord, inc, idx, store)

            with utils.StopWatch() as sw:
                for s in inc.iter_sacks():
                    c.process_new_measures_for_sack(s, blocking=True)
            LOG.info("Processed %d sacks in %.2fs",
                     inc.NUM_SACKS, sw.elapsed())
            LOG.info("Speed: %.2f measures/s",
                     float(total_measures) / sw.elapsed())

        if interval is None:
            break
        time.sleep(max(0, interval - interval_timer.elapsed()))

    return total_measures
Пример #10
0
    def treat_metric(self, resource_id, metric_name, metric_type, value,
                     sampling):

        if metric_type == "absolute":
            if sampling is not None:
                raise ValueError(
                    "Invalid sampling for ms: `%d`, should be none" % sampling)

            if resource_id not in self.absolute:
                self.absolute[resource_id] = collections.defaultdict(list)

            self.absolute[resource_id][metric_name] = storage.Measure(
                utils.dt_in_unix_ns(utils.utcnow()), value)
        elif metric_type == "guage":
            if sampling is not None:
                raise ValueError(
                    "Invalid sampling for g: `%d`, should be none" % sampling)
            if resource_id not in self.gauges:
                self.gauges[resource_id] = collections.defaultdict(list)

            self.gauges[resource_id][metric_name] = storage.Measure(
                utils.dt_in_unix_ns(utils.utcnow()), value)
        elif metric_type == "counter":
            sampling = 1 if sampling is None else sampling
            if resource_id not in self.counters:
                self.counters[resource_id] = collections.defaultdict(list)
            if metric_name in self.counters[resource_id]:
                current_value = self.counters[resource_id][metric_name].value
            else:
                current_value = 0
            self.counters[resource_id][metric_name] = storage.Measure(
                utils.dt_in_unix_ns(utils.utcnow()),
                current_value + (value * (1 / sampling)))
        # TODO(jd) Support "set" type
        # elif metric_type == "s":
        #     pass
        else:
            raise ValueError("Unknown metric type `%s'" % metric_type)
Пример #11
0
class ResourceMixin(ResourceJsonifier):
    @declarative.declared_attr
    def __table_args__(cls):
        return (sqlalchemy.CheckConstraint('started_at <= ended_at',
                                           name="ck_started_before_ended"),
                COMMON_TABLES_ARGS)

    @declarative.declared_attr
    def type(cls):
        return sqlalchemy.Column(
            sqlalchemy.String(255),
            sqlalchemy.ForeignKey('resource_type.name',
                                  ondelete="RESTRICT",
                                  name="fk_%s_resource_type_name" %
                                  cls.__tablename__),
            nullable=False)

    created_by_user_id = sqlalchemy.Column(sqlalchemy.String(255))
    created_by_project_id = sqlalchemy.Column(sqlalchemy.String(255))
    started_at = sqlalchemy.Column(
        PreciseTimestamp,
        nullable=False,
        # NOTE(jd): We would like to use
        # sqlalchemy.func.now, but we can't
        # because the type of PreciseTimestamp in
        # MySQL is not a Timestamp, so it would
        # not store a timestamp but a date as an
        # integer.
        default=lambda: utils.utcnow())
    revision_start = sqlalchemy.Column(PreciseTimestamp,
                                       nullable=False,
                                       default=lambda: utils.utcnow())
    ended_at = sqlalchemy.Column(PreciseTimestamp)
    user_id = sqlalchemy.Column(sqlalchemy.String(255))
    project_id = sqlalchemy.Column(sqlalchemy.String(255))
    original_resource_id = sqlalchemy.Column(sqlalchemy.String(255))
Пример #12
0
class ResourceHistory(ResourceMixin, Base, GnocchiBase):
    __tablename__ = 'resource_history'

    revision = sqlalchemy.Column(sqlalchemy.Integer, autoincrement=True,
                                 primary_key=True)
    id = sqlalchemy.Column(sqlalchemy_utils.UUIDType(),
                           sqlalchemy.ForeignKey(
                               'resource.id',
                               ondelete="CASCADE",
                               name="fk_rh_id_resource_id"),
                           nullable=False)
    revision_end = sqlalchemy.Column(types.TimestampUTC, nullable=False,
                                     default=lambda: utils.utcnow())
    metrics = sqlalchemy.orm.relationship(
        Metric, primaryjoin="Metric.resource_id == ResourceHistory.id",
        foreign_keys='Metric.resource_id')
Пример #13
0
def upgrade():

    op.alter_column('resource_type',
                    'state',
                    type_=state_enum,
                    nullable=False,
                    server_default=None)

    # NOTE(sileht): postgresql have a builtin ENUM type, so
    # just altering the column won't works.
    # https://bitbucket.org/zzzeek/alembic/issues/270/altering-enum-type
    # Does it break offline migration because we use get_bind() ?

    # NOTE(luogangyi): since we cannot use 'ALTER TYPE' in transaction,
    # we split the 'ALTER TYPE' operation into several steps.
    bind = op.get_bind()
    if bind and bind.engine.name == "postgresql":
        op.execute("ALTER TYPE resource_type_state_enum RENAME TO \
                    old_resource_type_state_enum")
        op.execute("CREATE TYPE resource_type_state_enum AS ENUM \
                       ('active', 'creating', 'creation_error', \
                        'deleting', 'deletion_error', 'updating', \
                        'updating_error')")
        op.execute("ALTER TABLE resource_type ALTER COLUMN state TYPE \
                   resource_type_state_enum USING \
                   state::text::resource_type_state_enum")
        op.execute("DROP TYPE old_resource_type_state_enum")

    # NOTE(sileht): we can't alter type with server_default set on
    # postgresql...
    op.alter_column('resource_type',
                    'state',
                    type_=state_enum,
                    nullable=False,
                    server_default="creating")
    op.add_column(
        "resource_type",
        sa.Column("updated_at",
                  sqlalchemy_base.PreciseTimestamp(),
                  nullable=True))

    op.execute(resource_type.update().values({'updated_at': utils.utcnow()}))
    op.alter_column("resource_type",
                    "updated_at",
                    type_=sqlalchemy_base.PreciseTimestamp(),
                    nullable=False)
def upgrade():

    op.alter_column('resource_type', 'state',
                    type_=state_enum,
                    nullable=False,
                    server_default=None)

    # NOTE(sileht): postgresql have a builtin ENUM type, so
    # just altering the column won't works.
    # https://bitbucket.org/zzzeek/alembic/issues/270/altering-enum-type
    # Does it break offline migration because we use get_bind() ?

    # NOTE(luogangyi): since we cannot use 'ALTER TYPE' in transaction,
    # we split the 'ALTER TYPE' operation into several steps.
    bind = op.get_bind()
    if bind and bind.engine.name == "postgresql":
        op.execute("ALTER TYPE resource_type_state_enum RENAME TO \
                    old_resource_type_state_enum")
        op.execute("CREATE TYPE resource_type_state_enum AS ENUM \
                       ('active', 'creating', 'creation_error', \
                        'deleting', 'deletion_error', 'updating', \
                        'updating_error')")
        op.execute("ALTER TABLE resource_type ALTER COLUMN state TYPE \
                   resource_type_state_enum USING \
                   state::text::resource_type_state_enum")
        op.execute("DROP TYPE old_resource_type_state_enum")

    # NOTE(sileht): we can't alter type with server_default set on
    # postgresql...
    op.alter_column('resource_type', 'state',
                    type_=state_enum,
                    nullable=False,
                    server_default="creating")
    op.add_column("resource_type",
                  sa.Column("updated_at",
                            sqlalchemy_types.PreciseTimestamp(),
                            nullable=True))

    op.execute(resource_type.update().values({'updated_at': utils.utcnow()}))
    op.alter_column("resource_type", "updated_at",
                    type_=sqlalchemy_types.PreciseTimestamp(),
                    nullable=False)
Пример #15
0
    def update_resource(self, resource_type,
                        resource_id, ended_at=_marker, metrics=_marker,
                        append_metrics=False,
                        **kwargs):

        now = utils.utcnow()

        resource_cls = self._resource_type_to_class(resource_type)
        resource_history_cls = self._resource_type_to_class(resource_type,
                                                            "history")
        session = self.engine_facade.get_session()
        try:
            with session.begin():
                # NOTE(sileht): We use FOR UPDATE that is not galera friendly,
                # but they are no other way to cleanly patch a resource and
                # store the history that safe when two concurrent calls are
                # done.
                q = session.query(resource_cls).filter(
                    resource_cls.id == resource_id).with_for_update()

                r = q.first()
                if r is None:
                    raise indexer.NoSuchResource(resource_id)

                # Build history
                rh = resource_history_cls()
                for col in sqlalchemy.inspect(resource_cls).columns:
                    setattr(rh, col.name, getattr(r, col.name))
                rh.revision_end = now
                session.add(rh)

                # Update the resource
                if ended_at is not _marker:
                    # NOTE(jd) MySQL does not honor checks. I hate it.
                    engine = self.engine_facade.get_engine()
                    if engine.dialect.name == "mysql":
                        if r.started_at is not None and ended_at is not None:
                            if r.started_at > ended_at:
                                raise indexer.ResourceValueError(
                                    resource_type, "ended_at", ended_at)
                    r.ended_at = ended_at

                r.revision_start = now

                if kwargs:
                    for attribute, value in six.iteritems(kwargs):
                        if hasattr(r, attribute):
                            setattr(r, attribute, value)
                        else:
                            raise indexer.ResourceAttributeError(
                                r.type, attribute)

                if metrics is not _marker:
                    if not append_metrics:
                        session.query(Metric).filter(
                            Metric.resource_id == resource_id,
                            Metric.status == 'active').update(
                                {"resource_id": None})
                    self._set_metrics_for_resource(session, r, metrics)
        except exception.DBConstraintError as e:
            if e.check_name == "ck_started_before_ended":
                raise indexer.ResourceValueError(
                    resource_type, "ended_at", ended_at)
            raise

        # NOTE(jd) Force load of metrics – do it outside the session!
        r.metrics

        session.expunge_all()
        return r
    def update_resource(self,
                        resource_type,
                        resource_id,
                        ended_at=_marker,
                        metrics=_marker,
                        append_metrics=False,
                        create_revision=True,
                        **kwargs):
        resource_cls = self._resource_type_to_class(resource_type)
        resource_history_cls = self._resource_type_to_class(
            resource_type, "history")
        with self.facade.writer() as session:
            try:
                # NOTE(sileht): We use FOR UPDATE that is not galera friendly,
                # but they are no other way to cleanly patch a resource and
                # store the history that safe when two concurrent calls are
                # done.
                q = session.query(resource_cls).filter(
                    resource_cls.id == resource_id).with_for_update()

                r = q.first()
                if r is None:
                    raise indexer.NoSuchResource(resource_id)

                if create_revision:
                    # Build history
                    rh = resource_history_cls()
                    for col in sqlalchemy.inspect(resource_cls).columns:
                        setattr(rh, col.name, getattr(r, col.name))
                    now = utils.utcnow()
                    rh.revision_end = now
                    session.add(rh)
                    r.revision_start = now

                # Update the resource
                if ended_at is not _marker:
                    # NOTE(jd) MySQL does not honor checks. I hate it.
                    engine = session.connection()
                    if engine.dialect.name == "mysql":
                        if r.started_at is not None and ended_at is not None:
                            if r.started_at > ended_at:
                                raise indexer.ResourceValueError(
                                    resource_type, "ended_at", ended_at)
                    r.ended_at = ended_at

                if kwargs:
                    for attribute, value in six.iteritems(kwargs):
                        if hasattr(r, attribute):
                            setattr(r, attribute, value)
                        else:
                            raise indexer.ResourceAttributeError(
                                r.type, attribute)

                if metrics is not _marker:
                    if not append_metrics:
                        session.query(Metric).filter(
                            Metric.resource_id == resource_id,
                            Metric.status == 'active').update(
                                {"resource_id": None})
                    self._set_metrics_for_resource(session, r, metrics)

                session.flush()
            except exception.DBConstraintError as e:
                if e.check_name == "ck_started_before_ended":
                    raise indexer.ResourceValueError(resource_type, "ended_at",
                                                     ended_at)
                raise

            # NOTE(jd) Force load of metrics – do it outside the session!
            r.metrics

            return r