Esempio n. 1
0
def test_smoke(
    hybridize: bool, target_dim_sample: int, use_marginal_transformation: bool
):
    num_batches_per_epoch = 1
    estimator = GPVAREstimator(
        distr_output=LowrankGPOutput(rank=2),
        num_cells=1,
        num_layers=1,
        pick_incomplete=True,
        prediction_length=metadata.prediction_length,
        target_dim=target_dim,
        target_dim_sample=target_dim_sample,
        freq=metadata.freq,
        use_marginal_transformation=use_marginal_transformation,
        trainer=Trainer(
            epochs=2,
            batch_size=10,
            learning_rate=1e-4,
            num_batches_per_epoch=num_batches_per_epoch,
            hybridize=hybridize,
        ),
    )

    predictor = estimator.train(training_data=dataset.train)

    agg_metrics, _ = backtest_metrics(
        test_dataset=dataset.test,
        predictor=predictor,
        num_samples=10,
        evaluator=MultivariateEvaluator(
            quantiles=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)
        ),
    )
    assert agg_metrics["ND"] < 2.5
def test_listing_1():
    """
    Test GluonTS paper examples from arxiv paper:
    https://arxiv.org/abs/1906.05264

    Listing 1
    """
    from gluonts.dataset.repository.datasets import get_dataset
    from gluonts.evaluation import backtest_metrics, Evaluator
    from gluonts.model.deepar import DeepAREstimator
    from gluonts.mx.trainer import Trainer

    # We use electricity in the paper but that would take too long to run in
    # the unit test
    dataset_info, train_ds, test_ds = constant_dataset()

    meta = dataset_info.metadata

    estimator = DeepAREstimator(
        freq=meta.freq,
        prediction_length=1,
        trainer=Trainer(epochs=1, batch_size=32),
    )
    predictor = estimator.train(train_ds)

    evaluator = Evaluator(quantiles=(0.1, 0.5, 0.9))
    agg_metrics, item_metrics = backtest_metrics(
        test_dataset=test_ds,
        predictor=predictor,
        evaluator=evaluator,
    )
Esempio n. 3
0
def test_forecasts(method_name):
    if method_name == "mlp":
        # https://stackoverflow.com/questions/56254321/error-in-ifncol-matrix-rep-argument-is-of-length-zero
        # https://cran.r-project.org/web/packages/neuralnet/index.html
        #   published before the bug fix: https://github.com/bips-hb/neuralnet/pull/21
        # The issue is still open on nnfor package: https://github.com/trnnick/nnfor/issues/8
        # TODO: look for a workaround.
        pytest.xfail(
            "MLP currently does not work because "
            "the `neuralnet` package is not yet updated with a known bug fix in ` bips-hb/neuralnet`"
        )

    dataset = datasets.get_dataset("constant")

    (train_dataset, test_dataset, metadata) = (
        dataset.train,
        dataset.test,
        dataset.metadata,
    )

    freq = metadata.freq
    prediction_length = metadata.prediction_length

    params = dict(
        freq=freq, prediction_length=prediction_length, method_name=method_name
    )

    predictor = RForecastPredictor(**params)
    predictions = list(predictor.predict(train_dataset))

    forecast_type = (
        QuantileForecast
        if method_name in QUANTILE_FORECAST_METHODS
        else SampleForecast
    )
    assert all(
        isinstance(prediction, forecast_type) for prediction in predictions
    )

    assert all(prediction.freq == freq for prediction in predictions)

    assert all(
        prediction.prediction_length == prediction_length
        for prediction in predictions
    )

    assert all(
        prediction.start_date == forecast_start(data)
        for data, prediction in zip(train_dataset, predictions)
    )

    evaluator = Evaluator()
    agg_metrics, item_metrics = backtest_metrics(
        test_dataset=test_dataset,
        predictor=predictor,
        evaluator=evaluator,
    )
    assert agg_metrics["mean_wQuantileLoss"] < TOLERANCE
    assert agg_metrics["NRMSE"] < TOLERANCE
    assert agg_metrics["RMSE"] < TOLERANCE
Esempio n. 4
0
def test_accuracy(predictor_cls, parameters, accuracy):
    predictor = predictor_cls(freq=CONSTANT_DATASET_FREQ, **parameters)
    agg_metrics, item_metrics = backtest_metrics(
        test_dataset=constant_test_ds,
        predictor=predictor,
        evaluator=Evaluator(calculate_owa=True),
    )

    assert agg_metrics["ND"] <= accuracy
Esempio n. 5
0
    def test_accuracy(Estimator, hyperparameters, accuracy):
        estimator = from_hyperparameters(Estimator, hyperparameters, dsinfo)
        predictor = estimator.train(training_data=dsinfo.train_ds)
        agg_metrics, item_metrics = backtest_metrics(
            test_dataset=dsinfo.test_ds,
            predictor=predictor,
            evaluator=Evaluator(calculate_owa=statsmodels is not None,
                                num_workers=0),
        )

        if dsinfo.name == "synthetic":
            accuracy = 10.0

        assert agg_metrics["ND"] <= accuracy
Esempio n. 6
0
def test_localizer():
    dataset = ListDataset(
        data_iter=[{
            "start": "2012-01-01",
            "target": (np.zeros(20) + i * 0.1 + 0.01),
            "id": f"{i}",
        } for i in range(3)],
        freq="1H",
    )

    estimator = MeanEstimator(prediction_length=10, freq="1H", num_samples=50)

    local_pred = Localizer(estimator=estimator)
    agg_metrics, _ = backtest_metrics(test_dataset=dataset,
                                      predictor=local_pred)
Esempio n. 7
0
def test_deepvar(
    distr_output,
    num_batches_per_epoch,
    Estimator,
    hybridize,
    use_marginal_transformation,
):

    estimator = Estimator(
        num_cells=20,
        num_layers=1,
        pick_incomplete=True,
        target_dim=target_dim,
        prediction_length=metadata.prediction_length,
        # target_dim=target_dim,
        freq=metadata.freq,
        distr_output=distr_output,
        scaling=False,
        use_marginal_transformation=use_marginal_transformation,
        trainer=Trainer(
            epochs=1,
            batch_size=8,
            learning_rate=1e-10,
            minimum_learning_rate=1e-13,
            num_batches_per_epoch=num_batches_per_epoch,
            hybridize=hybridize,
        ),
    )

    predictor = estimator.train(training_data=dataset.train)

    agg_metrics, _ = backtest_metrics(
        test_dataset=dataset.test,
        predictor=predictor,
        evaluator=MultivariateEvaluator(
            quantiles=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)
        ),
    )

    assert agg_metrics["ND"] < 1.5
Esempio n. 8
0
def test_general_functionality() -> None:
    ds_info, train_ds, test_ds = constant_dataset()
    freq = ds_info.metadata.freq
    prediction_length = ds_info.prediction_length

    trainer = Trainer(epochs=3, num_batches_per_epoch=5)

    estimator = DeepAREstimator(prediction_length=prediction_length,
                                freq=freq,
                                trainer=trainer)

    predictor = estimator.train(training_data=train_ds)

    agg_metrics, item_metrics = backtest_metrics(
        test_dataset=test_ds,
        predictor=predictor,
        evaluator=Evaluator(calculate_owa=False),
    )

    # just some sanity check
    assert (agg_metrics is not None and item_metrics is not None
            ), "Metrics should not be None if everything went smooth."
def test_deepvar_hierarchical(
    likelihood_weight,
    CRPS_weight,
    sample_LH,
    coherent_train_samples,
    coherent_pred_samples,
    warmstart_epoch_frac,
):
    estimator = DeepVARHierarchicalEstimator(
        freq=train_datasets.metadata.freq,
        prediction_length=prediction_length,
        target_dim=train_datasets.metadata.S.shape[0],
        S=train_datasets.metadata.S,
        likelihood_weight=likelihood_weight,
        CRPS_weight=CRPS_weight,
        sample_LH=sample_LH,
        coherent_train_samples=coherent_train_samples,
        coherent_pred_samples=coherent_pred_samples,
        warmstart_epoch_frac=warmstart_epoch_frac,
        trainer=Trainer(
            epochs=10,
            num_batches_per_epoch=1,
            hybridize=False,
        ),
        num_samples_for_loss=10,
    )

    predictor = estimator.train(training_data=train_datasets.train)

    agg_metrics, _ = backtest_metrics(
        test_dataset=train_datasets.test,
        predictor=predictor,
        evaluator=MultivariateEvaluator(quantiles=(0.1, 0.2, 0.3, 0.4, 0.5,
                                                   0.6, 0.7, 0.8, 0.9)),
    )

    assert agg_metrics["ND"] < 1.5
Esempio n. 10
0
def test_appendix_c():
    """
    Test GluonTS paper examples from arxiv paper:
    https://arxiv.org/abs/1906.05264

    Appendix C
    """
    from typing import List

    from mxnet import gluon

    from gluonts.core.component import validated
    from gluonts.mx.model.estimator import GluonEstimator
    from gluonts.model.predictor import Predictor
    from gluonts.mx.model.predictor import RepresentableBlockPredictor
    from gluonts.mx.trainer import Trainer
    from gluonts.mx.util import copy_parameters
    from gluonts.transform import (
        ExpectedNumInstanceSampler,
        InstanceSplitter,
        Transformation,
    )

    class MyTrainNetwork(gluon.HybridBlock):
        def __init__(self, prediction_length, cells, act_type, **kwargs):
            super().__init__(**kwargs)
            self.prediction_length = prediction_length
            with self.name_scope():
                # Set up a network that predicts the target
                self.nn = gluon.nn.HybridSequential()
                for c in cells:
                    self.nn.add(gluon.nn.Dense(units=c, activation=act_type))
                    self.nn.add(
                        gluon.nn.Dense(units=self.prediction_length,
                                       activation=act_type))

        def hybrid_forward(self, F, past_target, future_target):
            prediction = self.nn(past_target)
            # calculate L1 loss to learn the median
            return (prediction - future_target).abs().mean(axis=-1)

    class MyPredNetwork(MyTrainNetwork):
        # The prediction network only receives
        # past target and returns predictions
        def hybrid_forward(self, F, past_target):
            prediction = self.nn(past_target)
            return prediction.expand_dims(axis=1)

    class MyEstimator(GluonEstimator):
        @validated()
        def __init__(
                self,
                freq: str,
                prediction_length: int,
                act_type: str = "relu",
                context_length: int = 30,
                cells: List[int] = [40, 40, 40],
                trainer: Trainer = Trainer(epochs=10),
        ) -> None:
            super().__init__(trainer=trainer)
            self.freq = freq
            self.prediction_length = prediction_length
            self.act_type = act_type
            self.context_length = context_length
            self.cells = cells

        def create_training_network(self) -> MyTrainNetwork:
            return MyTrainNetwork(
                prediction_length=self.prediction_length,
                cells=self.cells,
                act_type=self.act_type,
            )

        def create_predictor(
            self,
            transformation: Transformation,
            trained_network: gluon.HybridBlock,
        ) -> Predictor:
            prediction_network = MyPredNetwork(
                prediction_length=self.prediction_length,
                cells=self.cells,
                act_type=self.act_type,
            )

            copy_parameters(trained_network, prediction_network)

            return RepresentableBlockPredictor(
                input_transform=transformation,
                prediction_net=prediction_network,
                batch_size=self.trainer.batch_size,
                freq=self.freq,
                prediction_length=self.prediction_length,
                ctx=self.trainer.ctx,
            )

        def create_transformation(self):
            # Model specific input transform
            # Here we use a transformation that randomly
            # selects training samples from all series.
            return InstanceSplitter(
                target_field=FieldName.TARGET,
                is_pad_field=FieldName.IS_PAD,
                start_field=FieldName.START,
                forecast_start_field=FieldName.FORECAST_START,
                train_sampler=ExpectedNumInstanceSampler(num_instances=1),
                past_length=self.context_length,
                future_length=self.prediction_length,
            )

    from gluonts.evaluation import backtest_metrics, Evaluator
    from gluonts.mx.trainer import Trainer

    dataset_info, train_ds, test_ds = constant_dataset()

    meta = dataset_info.metadata
    estimator = MyEstimator(
        freq=meta.freq,
        prediction_length=1,
        trainer=Trainer(epochs=1, batch_size=32),
    )
    predictor = estimator.train(train_ds)

    evaluator = Evaluator(quantiles=(0.1, 0.5, 0.9))
    agg_metrics, item_metrics = backtest_metrics(
        test_dataset=test_ds,
        predictor=predictor,
        evaluator=evaluator,
    )
Esempio n. 11
0
        trainer=Trainer(epochs=10, num_batches_per_epoch=10),
    )

    train1_output = estimator.train_model(dataset.train)

    # callback to overwrite parameters of the new model with the already trained model
    def copy_params(net):
        params1 = train1_output.trained_net.collect_params()
        params2 = net.collect_params()
        for p1, p2 in zip(params1.values(), params2.values()):
            p2.set_data(p1.data())

    estimator = SimpleFeedForwardEstimator(
        prediction_length=dataset.metadata.prediction_length,
        freq=dataset.metadata.freq,
        trainer=Trainer(
            epochs=5, num_batches_per_epoch=10, post_initialize_cb=copy_params
        ),
    )

    new_pred = estimator.train(dataset.train)

    ev = Evaluator(num_workers=0)
    agg_metrics1, _ = backtest_metrics(
        dataset.test, train1_output.predictor, evaluator=ev
    )
    agg_metrics2, _ = backtest_metrics(dataset.test, new_pred, evaluator=ev)

    df = pd.DataFrame([agg_metrics1, agg_metrics2], index=["model1", "model2"])
    print(df)
Esempio n. 12
0
    for dataset_name in datasets:
        for Estimator in Estimators:
            dataset = get_dataset(
                dataset_name=dataset_name,
                regenerate=False,
                path=Path("../datasets/"),
            )

            estimator = Estimator(
                prediction_length=dataset.metadata.prediction_length,
                freq=dataset.metadata.freq,
            )

            estimator_name = type(estimator).__name__

            print(f"evaluating {estimator_name} on {dataset_name}")

            agg_metrics, item_metrics = backtest_metrics(
                train_dataset=dataset.train,
                test_dataset=dataset.test,
                forecaster=estimator,
            )

            persist_evaluation(
                estimator_name=estimator_name,
                dataset=dataset_name,
                evaluation=agg_metrics,
                evaluation_path=dir_path,
            )