Пример #1
0
def test_simple(
    train_window, min_train_window, test_window, min_test_window, stride, warm_start
):
    duration = 30
    obs_dim = 2
    covariates = torch.zeros(duration, 0)
    data = torch.randn(duration, obs_dim) + 4
    forecaster_options = {"num_steps": 2, "warm_start": warm_start}

    expect_error = warm_start and train_window is not None
    with optional(pytest.raises(ValueError), expect_error):
        windows = backtest(
            data,
            covariates,
            Model,
            train_window=train_window,
            min_train_window=min_train_window,
            test_window=test_window,
            min_test_window=min_test_window,
            stride=stride,
            forecaster_options=forecaster_options,
        )
    if not expect_error:
        assert any(window["t0"] == 0 for window in windows)
        if stride == 1:
            assert any(window["t2"] == duration for window in windows)
        for window in windows:
            assert window["train_walltime"] >= 0
            assert window["test_walltime"] >= 0
            for name in DEFAULT_METRICS:
                assert name in window
                assert 0 < window[name] < math.inf
Пример #2
0
def test_custom_warm_start():
    duration = 30
    obs_dim = 2
    covariates = torch.zeros(duration, 0)
    data = torch.randn(duration, obs_dim) + 4
    min_train_window = 10

    def forecaster_options(t0, t1, t2):
        if t1 == min_train_window:
            return {"num_steps": 2, "warm_start": True}
        else:
            return {"num_steps": 0, "warm_start": True}

    backtest(data, covariates, Model,
             min_train_window=min_train_window,
             test_window=10,
             forecaster_options=forecaster_options)
Пример #3
0
def test_poisson(
    train_window, min_train_window, test_window, min_test_window, stride, engine
):
    duration = 30
    obs_dim = 2
    covariates = torch.zeros(duration, 0)
    rate = torch.randn(duration, obs_dim) + 4
    counts = dist.Poisson(rate).sample()

    # Transform count data to log domain.
    data = counts.log1p()

    def transform(pred, truth):
        pred = dist.Poisson(pred.clamp(min=1e-4).expm1()).sample()
        truth = truth.expm1()
        return pred, truth

    if engine == "svi":
        forecaster_fn = Forecaster
        forecaster_options = {"num_steps": 2}
    else:
        forecaster_fn = HMCForecaster
        forecaster_options = {"num_warmup": 1, "num_samples": 1}

    windows = backtest(
        data,
        covariates,
        Model,
        forecaster_fn=forecaster_fn,
        transform=transform,
        train_window=train_window,
        min_train_window=min_train_window,
        test_window=test_window,
        min_test_window=min_test_window,
        stride=stride,
        forecaster_options=forecaster_options,
    )

    assert any(window["t0"] == 0 for window in windows)
    if stride == 1:
        assert any(window["t0"] == 0 for window in windows)
        assert any(window["t2"] == duration for window in windows)
    for name in DEFAULT_METRICS:
        for window in windows:
            assert name in window
            assert 0 < window[name] < math.inf
Пример #4
0
def main(args):
    data, covariates = preprocess(args)

    # We will model positive count data by log1p-transforming it into real
    # valued data.  But since we want to evaluate back in the count domain, we
    # will also define a transform to apply during evaluation, transforming
    # from real back to count-valued data. Truth is mapped by the log1p()
    # inverse expm1(), but the prediction will be sampled from a Poisson
    # distribution.
    data = data.log1p()

    def transform(pred, truth):
        pred = torch.poisson(pred.clamp(min=1e-4).expm1())
        truth = truth.expm1()
        return pred, truth

    # The backtest() function automatically trains and evaluates our model on
    # different windows of data.
    forecaster_options = {
        "num_steps": args.num_steps,
        "learning_rate": args.learning_rate,
        "log_every": args.log_every,
        "dct_gradients": args.dct,
    }
    metrics = backtest(
        data,
        covariates,
        Model,
        train_window=args.train_window,
        test_window=args.test_window,
        stride=args.stride,
        num_samples=args.num_samples,
        forecaster_options=forecaster_options,
    )

    for name in ["mae", "rmse", "crps"]:
        values = [m[name] for m in metrics]
        mean = np.mean(values)
        std = np.std(values)
        print("{} = {:0.3g} +- {:0.3g}".format(name, mean, std))
    return metrics