コード例 #1
0
def _get_metrics(
    metrics: Dict[str, Dict[str, Any]], extra_labels: FrozenSet[str]
) -> Dict[str, MetricConfig]:
    """Return a dict mapping metric names to their configuration."""
    configs = {}
    # global metrics
    for metric_config in (
        _DB_ERRORS_METRIC_CONFIG,
        _QUERIES_METRIC_CONFIG,
        _QUERY_LATENCY_METRIC_CONFIG,
    ):
        # make a copy since labels are not immutable
        metric_config = deepcopy(metric_config)
        metric_config.config["labels"].extend(extra_labels)
        metric_config.config["labels"].sort()
        configs[metric_config.name] = metric_config
    # other metrics
    for name, config in metrics.items():
        _validate_metric_config(name, config, extra_labels)
        metric_type = config.pop("type")
        config.setdefault("labels", []).extend(extra_labels)
        config["labels"].sort()
        description = config.pop("description", "")
        configs[name] = MetricConfig(name, description, metric_type, config)
    return configs
コード例 #2
0
def _get_metrics(metrics: Dict[str, Dict[str, Any]]) -> List[MetricConfig]:
    """Return metrics configuration."""
    configs = []
    for name, config in metrics.items():
        metric_type = config.pop('type', '')
        if metric_type not in SUPPORTED_METRICS:
            raise ConfigError(f"Unsupported metric type: '{metric_type}'")
        description = config.pop('description', '')
        # add a 'database' label to have different series for sharded databases
        config['labels'] = ['database']
        configs.append(MetricConfig(name, description, metric_type, config))
    return configs
コード例 #3
0
def _get_metrics(metrics: Dict[str, Dict[str, Any]]) -> List[MetricConfig]:
    """Return metrics configuration."""
    # add global metrics
    configs = [DB_ERRORS_METRIC, QUERIES_METRIC]
    for name, config in metrics.items():
        try:
            _validate_metric_config(name, config)
            metric_type = config.pop("type")
            if metric_type not in SUPPORTED_METRICS:
                raise ConfigError(f'Unsupported metric type: "{metric_type}"')
            config.setdefault("labels", []).extend(AUTOMATIC_LABELS)
            config["labels"].sort()
            description = config.pop("description", "")
            configs.append(MetricConfig(name, description, metric_type, config))
        except KeyError as e:
            _raise_missing_key(e, "metric", name)
    return configs
コード例 #4
0
import yaml

from . import PACKAGE
from .db import (
    DataBase,
    DATABASE_LABEL,
    InvalidQueryParameters,
    InvalidQuerySchedule,
    Query,
    QueryMetric,
)

# metric for counting database errors
DB_ERRORS_METRIC_NAME = "database_errors"
_DB_ERRORS_METRIC_CONFIG = MetricConfig(
    DB_ERRORS_METRIC_NAME, "Number of database errors", "counter", {"labels": []},
)
# metric for counting performed queries
QUERIES_METRIC_NAME = "queries"
_QUERIES_METRIC_CONFIG = MetricConfig(
    QUERIES_METRIC_NAME,
    "Number of database queries",
    "counter",
    {"labels": ["status"]},
)
# metric for counting queries execution latency
QUERY_LATENCY_METRIC_NAME = "query_latency"
_QUERY_LATENCY_METRIC_CONFIG = MetricConfig(
    QUERY_LATENCY_METRIC_NAME,
    "Query execution latency",
    "histogram",
コード例 #5
0
import yaml

from .db import (
    AUTOMATIC_LABELS,
    DataBase,
    DATABASE_LABEL,
    Query,
    QueryMetric,
    validate_dsn,
)

# metric for counting database errors
DB_ERRORS_METRIC_NAME = "database_errors"
DB_ERRORS_METRIC = MetricConfig(
    DB_ERRORS_METRIC_NAME,
    "Number of database errors",
    "counter",
    {"labels": [DATABASE_LABEL]},
)
# metric for counting performed queries
QUERIES_METRIC_NAME = "queries"
QUERIES_METRIC = MetricConfig(
    QUERIES_METRIC_NAME,
    "Number of database queries",
    "counter",
    {"labels": [DATABASE_LABEL, "status"]},
)

# regexp for validating metrics and label names
_NAME_RE = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")

コード例 #6
0
from prometheus_aioexporter import MetricConfig
import yaml

from .db import (
    AUTOMATIC_LABELS,
    DataBase,
    DATABASE_LABEL,
    Query,
    QueryMetric,
    validate_dsn,
)

# metric for counting database errors
DB_ERRORS_METRIC_NAME = 'database_errors'
DB_ERRORS_METRIC = MetricConfig(DB_ERRORS_METRIC_NAME,
                                'Number of database errors', 'counter',
                                {'labels': [DATABASE_LABEL]})
# metric for counting performed queries
QUERIES_METRIC_NAME = 'queries'
QUERIES_METRIC = MetricConfig(QUERIES_METRIC_NAME,
                              'Number of database queries', 'counter',
                              {'labels': [DATABASE_LABEL, 'status']})

# regexp for validating metrics and label names
_NAME_RE = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*$')


class ConfigError(Exception):
    """Configuration is invalid."""

コード例 #7
0
import yaml

from . import PACKAGE
from .db import (
    DataBase,
    DATABASE_LABEL,
    InvalidQueryParameters,
    Query,
    QueryMetric,
)

# metric for counting database errors
DB_ERRORS_METRIC_NAME = "database_errors"
_DB_ERRORS_METRIC_CONFIG = MetricConfig(
    DB_ERRORS_METRIC_NAME,
    "Number of database errors",
    "counter",
    {"labels": []},
)
# metric for counting performed queries
QUERIES_METRIC_NAME = "queries"
_QUERIES_METRIC_CONFIG = MetricConfig(
    QUERIES_METRIC_NAME,
    "Number of database queries",
    "counter",
    {"labels": ["status"]},
)
GLOBAL_METRICS = frozenset([DB_ERRORS_METRIC_NAME, QUERIES_METRIC_NAME])

# regexp for validating environment variables names
_ENV_VAR_RE = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")