示例#1
0
    def __init__(self, path: Path = Path("/opt/ml")) -> None:
        self.path = ServePaths(path)

        batch_transform = os.environ.get("SAGEMAKER_BATCH", "false") == "true"
        if batch_transform:
            self.batch_config = ForecastConfig.parse_raw(
                os.environ["INFERENCE_CONFIG"])
        else:
            self.batch_config = None
def _output_fn(
    forecasts: List[Forecast],
    content_type: str = "application/json",
    config: Config = Config(quantiles=["0.1", "0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8", "0.9"]),
) -> Union[bytes, Tuple[bytes, str]]:
    """Take the prediction result and serializes it according to the response content type.

    Args:
        prediction (List[Forecast]): List of forecast results.
        content_type (str, optional): Ignored. Defaults to "".

    Returns:
        List[str]: List of JSON-lines, each denotes forecast results in quantiles.
    """

    # jsonify_floats is taken from gluonts/shell/serve/util.py
    #
    # The module depends on flask, and we may not want to import when testing in our own dev env.
    def jsonify_floats(json_object):
        """Traverse through the JSON object and converts non JSON-spec compliant floats(nan, -inf, inf) to string.

        Parameters
        ----------
        json_object
            JSON object
        """
        if isinstance(json_object, dict):
            return {k: jsonify_floats(v) for k, v in json_object.items()}
        elif isinstance(json_object, list):
            return [jsonify_floats(item) for item in json_object]
        elif isinstance(json_object, float):
            if np.isnan(json_object):
                return "NaN"
            elif np.isposinf(json_object):
                return "Infinity"
            elif np.isneginf(json_object):
                return "-Infinity"
            return json_object
        return json_object

    str_results = "\n".join((json.dumps(jsonify_floats(forecast.as_json_dict(config))) for forecast in forecasts))
    bytes_results = str.encode(str_results)
    return bytes_results, content_type
示例#3
0
        def invocations() -> Any:
            try:
                payload = flask.request.json
                configuration = payload['configuration']
                if 'num_samples' in configuration:
                    configuration['num_eval_samples'] = configuration[
                        'num_samples']
                config = ForecastConfig.parse_obj(configuration)

                def process(forecast: Forecast) -> dict:
                    prediction = {}
                    if 'samples' in config.output_types:
                        if isinstance(forecast, SampleForecast):
                            prediction['samples'] = forecast.samples.tolist()
                        else:
                            prediction['samples'] = []
                    if 'mean' in config.output_types:
                        prediction['mean'] = forecast.mean.tolist()
                    if 'quantiles' in config.output_types:
                        prediction['quantiles'] = {
                            q: forecast.quantile(q).tolist()
                            for q in config.quantiles
                        }
                    return prediction

                dataset = ListDataset(payload['instances'], predictor.freq)

                predictions = list(
                    map(
                        process,
                        predictor.predict(dataset,
                                          no_samples=config.num_eval_samples),
                    ))
                return flask.jsonify(predictions=predictions)

            except Exception as error:
                return flask.jsonify(error=traceback.format_exc()), 500
示例#4
0
import json
import os
from typing import Any, Callable, Dict, Optional, Tuple, Union

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from gluonts.evaluation import Evaluator
from gluonts.model.forecast import Config, Forecast
from smallmatter.ds import MontagePager

from .metrics import wmape
from .util import mkdir

output_configuration = Config(
    quantiles=["0.1", "0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8", "0.9"])

plt.rcParams.update({
    "legend.fontsize": 8,
    "axes.labelsize": 8,
    "axes.titlesize": 10,
    "xtick.labelsize": 8,
    "ytick.labelsize": 8,
    "legend.borderpad": 0.8,
})


class MyEvaluator(Evaluator):
    # NOTE: do not write anything to stdout or stderr, otherwise mix-up with tqdm progress bar.
    def __init__(
        self,
示例#5
0
def evaluate(dataset_name, estimator, horizon):
    train_ds, test_ds = get_custom_dataset(dataset_name, horizon)
    estimator = estimator(
        prediction_length=horizon,
        freq=freq,
        context_length=context_length,
        #cardinality=len(train_ds)
    )

    print(
        f"evaluating {estimator} on {dataset_name} dataset for {horizon} horizon"
    )

    predictor = estimator.train(train_ds)

    forecast_it, ts_it = make_evaluation_predictions(test_ds,
                                                     predictor=predictor,
                                                     num_samples=n_samples)

    print("Obtaining time series conditioning values ...")
    tss = list(tqdm(ts_it, total=len(test_ds)))
    print("Obtaining time series predictions ...")
    forecasts = list(tqdm(forecast_it, total=len(test_ds)))

    if plot:
        print("Plotting time series predictions ...")
        for i in tqdm(range(0, 361, 90)):
            ts_entry = tss[i]
            forecast_entry = forecasts[i]
            plot_prob_forecasts(ts_entry, forecast_entry, i, horizon,
                                context_length)

    print("Saving time series predictions ...")
    series = int(len(forecasts) / len(train_ds))
    sesies_q = np.empty((0, horizon * series), float)
    q10_, q50_, q90_, indexes_ = sesies_q, sesies_q, sesies_q, np.empty(
        (0, horizon * series), 'datetime64[s]')
    for i in range(len(train_ds)):
        q10, q50, q90, indexes = np.array([]), np.array([]), np.array(
            []), np.array([])
        for z in range(series):
            f_dict = forecasts[z * len(train_ds) + i].as_json_dict(
                Config(output_types={OutputType.quantiles}))['quantiles']
            q10 = np.append(q10, np.array(f_dict['0.1']))
            q50 = np.append(q50, np.array(f_dict['0.5']))
            q90 = np.append(q90, np.array(f_dict['0.9']))
            indexes = np.append(
                indexes,
                np.array(list(forecasts[z * len(train_ds) + i].index)))
        q10_ = np.vstack((q10_, q10))
        q50_ = np.vstack((q50_, q50))
        q90_ = np.vstack((q90_, q90))
        indexes_ = np.vstack((indexes_, indexes))

    if save:
        save_file = r"./save/{}_{}_{}".format(
            type(estimator).__name__, dataset_name, str(horizon))
        np.savetxt('{}_q10.txt'.format(save_file), q10_)
        np.savetxt('{}_q50.txt'.format(save_file), q50_)
        np.savetxt('{}_q90.txt'.format(save_file), q90_)
        np.savetxt('{}_index.txt'.format(save_file), indexes_, fmt='%s')

    print("Calculating time series prediction metrics ...")
    agg_metrics, item_metrics = Evaluator()(iter(tss),
                                            iter(forecasts),
                                            num_series=len(test_ds))

    pprint.pprint(agg_metrics)

    eval_dict = agg_metrics
    eval_dict["dataset"] = dataset_name
    eval_dict["estimator"] = type(estimator).__name__
    eval_dict["horizon"] = str(horizon)
    return eval_dict