예제 #1
0
def update_values_models(keys=None):
    """[Update predictions of all models]

    Raises:
        Exception: [description]
    """
    logger.info("Updating [Value] models")
    items = db_values.items()
    if not keys is None:
        items = filter(lambda item: item[0] in keys, items)
    for (h, record) in items:
        # find all models with hash(labels) same as valuesKey
        models = list(
            filter(lambda model: ts_hash(all_labels=model[1]["labels"]) == h,
                   db_models.items()))
        # pick the most recent model
        models.sort(key=lambda model: model[1].get("timestamp",
                                                   datetime.fromtimestamp(0)),
                    reverse=True)
        if len(models) == 0:
            raise Exception("There must be at least one predictor")
        predictor = models[0][1]["predictor"]
        if record["model"] != models[0][0]:
            logger.debug("Updating [Value:{h}] model to [Model:{mid}]".format(
                h=h, mid=models[0][0]))
            predictor.build_prediction_df()
            db_values[h]["model"] = models[0][0]
예제 #2
0
def update_model_predictions(keys=None):
    logger.info("Update predictions")
    items = db_models.items()
    if not keys is None:
        items = filter(lambda item: item[0] in keys, items)
    for (h, model_record) in items:
        predictor = model_record["predictor"]
        ts_h = ts_hash(all_labels=model_record["labels"])
        logger.debug(
            "Update prediction in [Model:{mid}], [Hash:{h}], labels:{labels}".
            format(mid=h, h=ts_h, labels=model_record["labels"]))
        predictor.build_prediction_df()
예제 #3
0
def load_model(key, r, hash_include=None):
    pipe = r.pipeline()
    pipe.get("manifest:{key}".format(key=key))
    res = pipe.execute()
    x_list = list(map(json.loads, res))
    # x_list.sort(key=lambda manifest: datetime.fromtimestamp(manifest.get("timestamp", 0)))
    manifest = x_list[0]
    # v =  version.parse(manifest.get("version", "0.0.0"))
    label_hash = ts_hash(metric_name=manifest["metric"]["metric_name"],
                         label_config=manifest["metric"]["label_config"])
    if hash_include is not None and not (label_hash in hash_include):
        logger.debug(
            "Skip loading model {h} ({metric_name}), label hash:{lh}".format(
                h=manifest["name"],
                metric_name=manifest["metric"]["metric_name"],
                lh=label_hash))
        return manifest, None
    h = manifest["name"]
    fsize = manifest["size"]
    cls_name = manifest["class"]
    md5 = manifest["md5"]
    cls = None

    data = r.get('model:{key}'.format(key=h))

    if hashlib.md5(data).hexdigest() != md5:
        raise Exception("checksum does not match")

    if cls_name == "prophet":
        cls = model_prophet.MetricPredictor
    else:
        raise NotImplementedError("Model class cannot be mapped to serializer")

    model = loads_model(cls, data)
    if model is None:
        raise Exception

    logger.debug(
        "Loaded model {h} ({metric_name}), label hash:{lh}, metric:{metric}".
        format(h=manifest["name"],
               metric_name=manifest["metric"]["metric_name"],
               lh=label_hash,
               metric=manifest["metric"]))

    return manifest, model
예제 #4
0
def update_tss():
    """Updates db_ts Store. Discover new TS and remove obsolete TS. 
    TS data are not stored in this record. Values record is used to store TS data.
    
    index (hash):
    {
        "labels" (dict): 
        "generation" (int):
    }
    metric (dict): Single record of a list returned by get_metric_range_data()
    generation (int): last update cycle when metric existed
    Raises:
        e: [description]
    """
    logger.info("Updating TS")
    now = datetime.now()
    generation = next(ts_generation)
    try:
        for metric in Configuration.metrics_list:
            current_start_time = now - Configuration.current_data_window_size
            metric_init = pc.get_metric_range_data(
                metric_name=metric,
                start_time=current_start_time,
                end_time=now)

            hash_metric_list = list(
                map(
                    lambda metric: (ts_hash(all_labels=metric["metric"]), {
                        "labels": metric["metric"],
                        "generation": generation
                    }), metric_init))
            logger.info("new TS: {tss}".format(tss=dict(hash_metric_list)))
            db_ts.update(hash_metric_list)
            logger.info("TS stats: {tss}".format(tss=db_ts))
    except Exception as e:
        raise e
예제 #5
0
def update_values(models_include=None):
    """Update db_values for every TS.
    If Values record exists then updates its metric. If Values record does not exist then its created
    When Values record is created its predictor Model selected. Value record is associated with its TS.
    
    index (hash):
    {
        "metric" (Metric): first item of return value of MetricsList(get_metric_range_data())
        "ts" (tsKey): key of db_ts
        "model" (modelKey): key of db_models
    }

    Raises:
        Exception: [description]
        Exception: [description]
        Exception: [description]
        e: [description]
    """
    logger.info("Updating Values")
    now = datetime.now()
    generation = next(values_generation)
    for (h, ts) in db_ts.items():
        logger.debug("Updating [TS:{h}], labels:{labels}".format(
            h=h, labels=ts["labels"]))
        if h in db_values.keys():
            # TS is already tracked by a Values record in db_values
            current_start_time = now - Configuration.current_data_window_size
            record = db_values[h]
            metric = record["metric"]
            metric_data = pc.get_metric_range_data(
                metric_name=metric.metric_name,
                label_config=metric.label_config,
                start_time=current_start_time,
                end_time=now)
            metrics = MetricsList(metric_data)
            if len(metrics) != 1:
                raise Exception("There can be only one")
            new_metric = metrics[0] + metric

            trunk_metric = Metric(
                new_metric, current_start_time
            )  # This throws some exception really fast but this would have solved the problem.
            db_values[h]["metric"] = trunk_metric
            db_values[h]["generation"] = generation
            logger.debug(
                "Update and truncate [Metric:{h}] horizon:{current_start_time} metric_name:{metric_name}, label_config:{label_config}"
                .format(h=h,
                        metric_name=metric.metric_name,
                        label_config=metric.label_config,
                        current_start_time=current_start_time))
        else:
            current_start_time = now - Configuration.current_data_window_size
            metric_name = ts["labels"]["__name__"]
            labels = dict()
            labels.update(ts["labels"])
            del labels["__name__"]

            items = db_models.items()
            if not models_include is None:
                items = filter(lambda item: item[0] in models_include, items)

            models = list(
                filter(
                    lambda model: ts_hash(all_labels=model[1]["labels"]) == h,
                    items))
            if len(models) == 0:
                logger.warning(
                    "No models matching labels for [Metric:{h}] metric_name:{metric_name}, label_config:{label_config}"
                    .format(h=h, metric_name=metric_name, label_config=labels))
                continue

            metric_data = pc.get_metric_range_data(
                metric_name=metric_name,
                label_config=labels,
                start_time=current_start_time,
                end_time=now)
            metrics = MetricsList(metric_data)
            if len(metrics) != 1:
                raise Exception("There can be only one")

            # pick the most recent model
            models.sort(key=lambda model: model[1].get(
                "timestamp", datetime.fromtimestamp(0)),
                        reverse=True)
            predictor = models[0][0]
            # predictor.build_prediction_df()
            record = {
                "metric": metrics[0],
                "ts": h,
                "model": predictor,
                "generation": generation
            }
            db_values.update({h: record})
            logger.debug(
                "Add [Metric:{h}] horizon:{current_start_time} metric_name:{metric_name}, label_config:{label_config}"
                .format(h=h,
                        metric_name=metric_name,
                        label_config=labels,
                        current_start_time=current_start_time))