Пример #1
0
    def activate(self, labels=()):
        """Instantiate specific Prometheus metric objects."""
        dbmind_assert(not self.is_label and self.prefix)

        self.value = PROMETHEUS_TYPES[self.usage](
            # Prefix query instance name to the specific metric.
            '%s_%s' % (self.prefix, self.name),
            self.desc,
            labels)
        return self.value
def quickly_forecast(sequence,
                     forecasting_minutes,
                     lower=0,
                     upper=float('inf')):
    """
    Return forecast sequence in forecasting_minutes from raw sequence.
    :param sequence: type->Sequence
    :param forecasting_minutes: type->int or float
    :param lower: The lower limit of the forecast result
    :param upper: The upper limit of the forecast result.
    :return: forecast sequence: type->Sequence
    """
    if len(sequence) <= 1:
        return Sequence()

    # 1. check for forecasting minutes
    _check_forecasting_minutes(forecasting_minutes)
    forecasting_length = int(forecasting_minutes * 60 * 1000 / sequence.step)
    if forecasting_length == 0 or forecasting_minutes == 0:
        return Sequence()

    # 2. interpolate
    interpolated_sequence = sequence_interpolate(sequence)

    # 3. decompose sequence
    seasonal_data, train_sequence = decompose_sequence(interpolated_sequence)

    # 4. get model from ForecastingFactory
    model = ForecastingFactory.get_instance(train_sequence)

    # 5. fit and forecast
    model.fit(train_sequence)

    forecast_data = model.forecast(forecasting_length)
    forecast_data = trim_head_and_tail_nan(forecast_data)
    dbmind_assert(len(forecast_data) == forecasting_length)

    # 6. compose sequence
    forecast_timestamps, forecast_values = compose_sequence(
        seasonal_data, train_sequence, forecast_data)

    for i in range(len(forecast_values)):
        forecast_values[i] = min(forecast_values[i], upper)
        forecast_values[i] = max(forecast_values[i], lower)

    return Sequence(timestamps=forecast_timestamps,
                    values=forecast_values,
                    name=sequence.name,
                    labels=sequence.labels)
Пример #3
0
def register_metrics(parsed_yml, force_connection_db=None):
    """Some metrics need to be queried on the specific database
    (e.g., tables or views under the dbe_perf schema need
    to query on the `postgres` database).
    Therefore, we cannot specify that all metrics are collected
    through the default database,
    and this is the purpose of the parameter `force_connection_db`.
    """
    dbmind_assert(isinstance(parsed_yml, dict))

    for name, raw_query_instance in parsed_yml.items():
        dbmind_assert(isinstance(raw_query_instance, dict))

        raw_query_instance.setdefault('name', name)
        instance = QueryInstance(raw_query_instance)
        instance.force_query_into_another_db(force_connection_db)
        instance.register(_registry)
        query_instances.append(instance)
def compose_sequence(seasonal_data, train_sequence, forecast_values):
    forecast_length = len(forecast_values)
    if seasonal_data and seasonal_data.is_seasonal:
        start_index = len(train_sequence) % seasonal_data.period
        seasonal = seasonal_data.seasonal
        resid = seasonal_data.resid
        dbmind_assert(len(seasonal) == len(resid))

        if len(seasonal) - start_index < forecast_length:
            # pad it.
            padding_length = forecast_length - (len(seasonal) - start_index)
            seasonal = np.pad(seasonal, (0, padding_length), mode='wrap')
            resid = np.pad(resid, (0, padding_length), mode='wrap')
        seasonal = seasonal[start_index:start_index + forecast_length]
        resid = resid[start_index:start_index + forecast_length]
        forecast_values = seasonal + forecast_values + resid

    forecast_timestamps = [
        train_sequence.timestamps[-1] + train_sequence.step * i
        for i in range(1, forecast_length + 1)
    ]
    return forecast_timestamps, forecast_values
Пример #5
0
    def __init__(self, d):
        self.name = d['name']
        self.desc = d.get('desc', '')
        self.queries = list()
        self.metrics = list()
        self.labels = list()
        self.status = d.get('status', 'enable') == 'enable'
        self.ttl = d.get('ttl', 0)
        self.timeout = d.get('timeout', 0)
        self.public = d.get('public', True)

        # Compatible with PG-exporter format,
        # convert the query field into a list.
        if isinstance(d['query'], str):
            d['query'] = [{
                'name': self.name,
                'sql': d['query'],
                'ttl': self.ttl,
                'timeout': self.timeout
            }]

        dbmind_assert(isinstance(d['query'], list))
        for q in d['query']:
            # Compatible with PG-exporter
            query = Query(q)
            # TODO: check whether the query is invalid.
            if query.status and query.dbrole == _dbrole and is_valid_version(
                    query.version):
                self.queries.append(query)
            else:
                logging.info(
                    'Skip the query %s (status: %s, dbRole: %s, version: %s).'
                    % (query.name, query.status, query.dbrole, query.version))

        for m in d['metrics']:
            # Compatible with PG-exporter
            if len(m) == len(
                {'metric_name': {
                    'usage': '?',
                    'description': '?'
                }}):
                # Covert to the openGauss-exporter format.
                # The following is a demo for metric structure in the openGauss-exporter:
                # {'name': 'metric_name', 'usage': '?', 'description': '?'}
                name, value = next(iter(m.items()))
                m = {'name': name}
                m.update(value)

            # Parse dict structure to a Metric object, then we can
            # use this object's fields directly.
            metric = Metric(m)
            if not metric.is_valid:
                continue
            if not metric.is_label:
                metric.prefix = self.name
                self.metrics.append(metric)
            else:
                self.labels.append(metric.name)

        # `global_labels` is required and must be added anytime.
        self.labels.extend(global_labels.keys())
        self._forcing_db = None