def test_invalid_rolling_parameters(prediction_length, unique): try: generate_rolling_dataset( dataset=generate_dataset("constant"), start_time=pd.Timestamp("2000-01-01-20", freq="1H"), end_time=pd.Timestamp("2000-01-02-00", freq="1H"), strategy=StepStrategy( step_size=prediction_length if unique else 1, prediction_length=prediction_length, ), ) # program should have failed at this point pytest.raises(RuntimeError) except AssertionError: pass
def test_dynamic_features( test_length: int, prediction_length: int, target_start: str, rolling_start: str, num_dynamic_feat: int, ): # generate datasets with dynamic values equal to the target values rolled_ds = generate_rolling_dataset( dataset=create_dynamic_dataset(target_start, test_length, num_dynamic_feat), strategy=StepStrategy(prediction_length=prediction_length), start_time=pd.Period(rolling_start), ) for timeseries in rolled_ds: for feature_name in ( FieldName.FEAT_DYNAMIC_REAL, FieldName.FEAT_DYNAMIC_CAT, ): assert len(timeseries[feature_name]) == num_dynamic_feat for feature in timeseries[feature_name]: # since target and dynamic features has the same values # in the dataset we can ensure ordering and length of # the features by comparing them with the target value assert (timeseries[FieldName.TARGET] == feature).all()
def test_dynamic_integration( train_length: int, test_length: int, prediction_length: int, target_start: str, rolling_start: str, num_dynamic_feat: int, ): """ Trains an estimator on a rolled dataset with dynamic features. Tests https://github.com/awslabs/gluon-ts/issues/1390 """ train_ds = create_dynamic_dataset(target_start, train_length, num_dynamic_feat) rolled_ds = generate_rolling_dataset( dataset=create_dynamic_dataset(target_start, test_length, num_dynamic_feat), strategy=StepStrategy(prediction_length=prediction_length), start_time=pd.Timestamp(rolling_start), ) estimator = DeepAREstimator( freq="D", prediction_length=prediction_length, context_length=2 * prediction_length, use_feat_dynamic_real=True, trainer=Trainer(epochs=1), ) predictor = estimator.train(training_data=train_ds) forecast_it, ts_it = make_evaluation_predictions(rolled_ds, predictor=predictor, num_samples=100) training_agg_metrics, _ = Evaluator(num_workers=0)(ts_it, forecast_it) # it should have failed by this point if the dynamic features were wrong assert training_agg_metrics
def test_step_strategy(ds_name, prediction_length, unique, ignore_end, ds_expected): rolled_ds = generate_rolling_dataset( dataset=generate_dataset(ds_name), start_time=pd.Period("2000-01-01-20", freq="1H"), end_time=None if ignore_end else pd.Period("2000-01-02-00", freq="1H"), strategy=StepStrategy( step_size=prediction_length if unique else 1, prediction_length=prediction_length, ), ) i = 0 for ts in rolled_ds: assert len(ts["target"]) == len(ds_expected[i]) for rolled_result, expected in zip(ts["target"], ds_expected[i]): assert rolled_result == expected i += 1 assert len(ds_expected) == i
StepStrategy, generate_rolling_dataset, ) if __name__ == "__main__": dataset = get_dataset("constant", regenerate=False) estimator = SimpleFeedForwardEstimator( prediction_length=dataset.metadata.prediction_length, freq=dataset.metadata.freq, trainer=Trainer(epochs=5, num_batches_per_epoch=10), ) predictor = estimator.train(dataset.train) # create the rolled dataset to use for forecasting and evaluation dataset_rolled = generate_rolling_dataset( dataset=dataset.test, start_time=pd.Timestamp("2000-01-01-15", freq="1H"), end_time=pd.Timestamp("2000-01-02-04", freq="1H"), strategy=StepStrategy( prediction_length=dataset.metadata.prediction_length, ), ) forecast_it, ts_it = make_evaluation_predictions( dataset_rolled, predictor=predictor, num_samples=len(dataset_rolled)) agg_metrics, _ = Evaluator()(ts_it, forecast_it) pprint(agg_metrics)