_LOGGER = logging.getLogger(__name__)

METRICS_LIST = Configuration.metrics_list


PREDICTOR_MODEL_LIST = []

pc = PrometheusConnect(
    url=Configuration.prometheus_url, headers=Configuration.prom_connect_headers, disable_ssl=True
)
for metric in METRICS_LIST:
    # Initialize a predictor for all metrics first
    metric_init = pc.get_current_metric_value(metric_name=metric)
    for unique_metric in metric_init:
        PREDICTOR_MODEL_LIST.append(
            model.MetricPredictor(unique_metric, Configuration.rolling_data_window_size)
        )

# A gauge set for the predicted values
GAUGE_DICT = dict()
for predictor in PREDICTOR_MODEL_LIST:
    unique_metric = predictor.metric
    label_list = list(unique_metric.label_config.keys())
    label_list.append("value_type")
    if unique_metric.metric_name not in GAUGE_DICT:
        GAUGE_DICT[unique_metric.metric_name] = Gauge(
            unique_metric.metric_name + "_" + predictor.model_name,
            predictor.model_description,
            label_list,
        )

pc = PrometheusConnect(
    url=Configuration.prometheus_url,
    headers=Configuration.prom_connect_headers,
    disable_ssl=True,
)

for metric in METRICS_LIST:
    # Initialize a predictor for all metrics first
    metric_init = pc.get_current_metric_value(metric_name=metric)

    for unique_metric in metric_init:
        PREDICTOR_MODEL_LIST.append(
            model.MetricPredictor(
                unique_metric,
                rolling_data_window_size=Configuration.rolling_training_window_size,
            )
        )

# A gauge set for the predicted values
GAUGE_DICT = dict()
for predictor in PREDICTOR_MODEL_LIST:
    unique_metric = predictor.metric
    label_list = list(unique_metric.label_config.keys())
    label_list.append("value_type")
    if unique_metric.metric_name not in GAUGE_DICT:
        GAUGE_DICT[unique_metric.metric_name] = Gauge(
            unique_metric.metric_name + "_" + predictor.model_name,
            predictor.model_description,
            label_list,
        )
Exemple #3
0
            print(timeseries.metric_name, timeseries.label_config)
        _LOGGER.error("One metric should be specific to a single time-series")
        raise ValueError

    # Download test data
    test_data_list = pc.get_metric_range_data(
        metric_name=train_data[0].metric_name,
        label_config=train_data[0].label_config,
        start_time=Configuration.metric_train_data_end_time,
        end_time=Configuration.metric_end_time,
        chunk_size=Configuration.metric_chunk_size,
    )
    _LOGGER.info("Downloaded metric data")

    model_mp = model.MetricPredictor(
        train_data[0],
        rolling_data_window_size=Configuration.rolling_training_window_size)

    mlflow.set_experiment(train_data[0].metric_name)
    mlflow.start_run()
    mlflow_run_id = mlflow.active_run().info.run_id

    # keep track of the model name as a mlflow run tag
    mlflow.set_tag("model", model_mp.model_name)

    # keep track of labels as tags in the mlflow experiment
    for label in train_data[0].label_config:
        mlflow.set_tag(label, train_data[0].label_config[label])

    # store the metric with labels as a tag so it can be copied into grafana to view the real metric
    mlflow.set_tag("metric", metric)
        _LOGGER.error("The timeseries matched were: ")
        for timeseries in train_data:
            print(timeseries.metric_name, timeseries.label_config)
        _LOGGER.error("One metric should be specific to a single time-series")
        raise ValueError

    # Download test data
    test_data_list = pc.get_metric_range_data(
        metric_name=metric,
        start_time=rolling_data_window,
        chunk_size=str(Configuration.retraining_interval_minutes) + "m",
    )

    _LOGGER.info("Downloaded metric data")

    model_mp = model.MetricPredictor(train_data[0],
                                     rolling_data_window_size=None)

    mlflow.set_experiment(train_data[0].metric_name)
    mlflow.start_run()
    mlflow_run_id = mlflow.active_run().info.run_id

    # keep track of the model name as a mlflow run tag
    mlflow.set_tag("model", model_mp.model_name)

    # keep track of labels as tags in the mlflow experiment
    for label in model_mp.metric.label_config:
        mlflow.set_tag(label, train_data[0].label_config[label])

    # store the metric with labels as a tag so it can be copied into grafana to view the real metric
    mlflow.set_tag("metric", metric)