Пример #1
0
def test_studentT_likelihood(
    mu_sigma_nu: Tuple[float, float, float], hybridize: bool
) -> None:
    '''
    Test to check that maximizing the likelihood recovers the parameters
    '''
    # test instance
    mu, sigma, nu = mu_sigma_nu

    # generate samples
    mus = mx.nd.zeros((NUM_SAMPLES, 1)) + mu
    sigmas = mx.nd.zeros((NUM_SAMPLES, 1)) + sigma
    nus = mx.nd.zeros((NUM_SAMPLES, 1)) + nu

    distr = StudentT(mus, sigmas, nus)
    samples = distr.sample()

    # nu takes very long to learn, so we initialize it at the true value.
    # transform used is softplus(x) + 2
    init_bias = [
        mu - START_TOL_MULTIPLE * TOL * mu,
        inv_softplus(sigma - START_TOL_MULTIPLE * TOL * sigma),
        inv_softplus(nu - 2),
    ]

    mu_hat, sigma_hat, nu_hat = maximum_likelihood_estimate_sgd(
        StudentTOutput(),
        samples,
        init_biases=init_bias,
        hybridize=hybridize,
        num_epochs=PositiveInt(10),
        learning_rate=1e-2,
    )

    assert (
        np.abs(mu_hat - mu) < TOL * mu
    ), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
    assert (
        np.abs(sigma_hat - sigma) < TOL * sigma
    ), f"sigma did not match: sigma = {sigma}, sigma_hat = {sigma_hat}"
    assert (
        np.abs(nu_hat - nu) < TOL * nu
    ), "nu0 did not match: nu0 = %s, nu_hat = %s" % (nu, nu_hat)
def test_shape():
    """
    Makes sure additional tensors can be accessed and have expected shapes
    """
    prediction_length = ds_info.prediction_length
    estimator = DeepAREstimator(
        freq=freq,
        prediction_length=prediction_length,
        trainer=Trainer(epochs=1, num_batches_per_epoch=1),
        distr_output=StudentTOutput(),
    )

    training_transformation, trained_net = estimator.train_model(train_ds)

    # todo adapt loader to anomaly detection use-case
    batch_size = 2
    training_data_loader = TrainDataLoader(
        dataset=train_ds,
        transform=training_transformation,
        batch_size=batch_size,
        num_batches_per_epoch=estimator.trainer.num_batches_per_epoch,
        ctx=mx.cpu(),
    )

    seq_len = 2 * ds_info.prediction_length

    for data_entry in islice(training_data_loader, 1):
        input_names = get_hybrid_forward_input_names(trained_net)

        loss, likelihoods, *distr_args = trained_net(
            *[data_entry[k] for k in input_names])

        distr = StudentT(*distr_args)

        assert likelihoods.shape == (batch_size, seq_len)
        assert distr.mu.shape == (batch_size, seq_len)
        assert distr.sigma.shape == (batch_size, seq_len)
        assert distr.nu.shape == (batch_size, seq_len)
Пример #3
0
    [
        (
            Gaussian(
                mu=mx.nd.zeros(shape=SHAPE),
                sigma=1e-3 + 0.2 * mx.nd.ones(shape=SHAPE),
            ),
            Gaussian(
                mu=mx.nd.ones(shape=SHAPE),
                sigma=1e-3 + 0.1 * mx.nd.ones(shape=SHAPE),
            ),
            0.2 * mx.nd.ones(shape=SHAPE),
        ),
        (
            StudentT(
                mu=mx.nd.ones(shape=SHAPE),
                sigma=1e-1 + mx.nd.zeros(shape=SHAPE),
                nu=mx.nd.zeros(shape=SHAPE) + 2.2,
            ),
            Gaussian(
                mu=-mx.nd.ones(shape=SHAPE),
                sigma=1e-1 + mx.nd.zeros(shape=SHAPE),
            ),
            mx.nd.random_uniform(shape=SHAPE),
        ),
        # TODO: add a multivariate case here
    ],
)
def test_mixture(
    distr1: Distribution, distr2: Distribution, p: Tensor
) -> None:
Пример #4
0
    [
        (
            Gaussian(
                mu=mx.nd.zeros(shape=(3, 4, 5)),
                sigma=1e-3 + 0.2 * mx.nd.ones(shape=(3, 4, 5)),
            ),
            Gaussian(
                mu=mx.nd.ones(shape=(3, 4, 5)),
                sigma=1e-3 + 0.1 * mx.nd.ones(shape=(3, 4, 5)),
            ),
            0.2 * mx.nd.ones(shape=(3, 4, 5)),
        ),
        (
            StudentT(
                mu=mx.nd.ones(shape=(3, 4, 5)),
                sigma=1e-1 + mx.nd.zeros(shape=(3, 4, 5)),
                nu=mx.nd.ones(shape=(3, 4, 5)),
            ),
            Gaussian(
                mu=-mx.nd.ones(shape=(3, 4, 5)),
                sigma=1e-1 + mx.nd.zeros(shape=(3, 4, 5)),
            ),
            mx.nd.random_uniform(shape=(3, 1, 5)),
        ),
        # TODO: add a multivariate case here
    ],
)
def test_mixture(
    distr1: Distribution, distr2: Distribution, p: Tensor
) -> None:
Пример #5
0
 ),
 Gaussian(
     mu=mx.nd.zeros(shape=BATCH_SHAPE),
     sigma=mx.nd.ones(shape=BATCH_SHAPE),
 ),
 Gamma(
     alpha=mx.nd.ones(shape=BATCH_SHAPE),
     beta=mx.nd.ones(shape=BATCH_SHAPE),
 ),
 Beta(
     alpha=0.5 * mx.nd.ones(shape=BATCH_SHAPE),
     beta=0.5 * mx.nd.ones(shape=BATCH_SHAPE),
 ),
 StudentT(
     mu=mx.nd.zeros(shape=BATCH_SHAPE),
     sigma=mx.nd.ones(shape=BATCH_SHAPE),
     nu=mx.nd.ones(shape=BATCH_SHAPE),
 ),
 Dirichlet(alpha=mx.nd.ones(shape=BATCH_SHAPE)),
 Laplace(
     mu=mx.nd.zeros(shape=BATCH_SHAPE), b=mx.nd.ones(shape=BATCH_SHAPE)
 ),
 NegativeBinomial(
     mu=mx.nd.zeros(shape=BATCH_SHAPE),
     alpha=mx.nd.ones(shape=BATCH_SHAPE),
 ),
 Poisson(rate=mx.nd.ones(shape=BATCH_SHAPE)),
 Uniform(
     low=-mx.nd.ones(shape=BATCH_SHAPE),
     high=mx.nd.ones(shape=BATCH_SHAPE),
 ),