def test_studentT_likelihood(
    mu: float, sigma: float, nu: float, hybridize: bool
) -> None:
    """
    Test to check that maximizing the likelihood recovers the parameters
    """

    # generate samples
    mus = mx.nd.zeros((NUM_SAMPLES,)) + mu
    sigmas = mx.nd.zeros((NUM_SAMPLES,)) + sigma
    nus = mx.nd.zeros((NUM_SAMPLES,)) + nu

    distr = StudentT(mus, sigmas, nus)
    samples = distr.sample()

    # nu takes very long to learn, so we initialize it at the true value.
    # transform used is softplus(x) + 2
    init_bias = [
        mu - START_TOL_MULTIPLE * TOL * mu,
        inv_softplus(sigma - START_TOL_MULTIPLE * TOL * sigma),
        inv_softplus(nu - 2),
    ]

    mu_hat, sigma_hat, nu_hat = maximum_likelihood_estimate_sgd(
        StudentTOutput(),
        samples,
        init_biases=init_bias,
        hybridize=hybridize,
        num_epochs=PositiveInt(10),
        learning_rate=PositiveFloat(1e-2),
    )

    assert (
        np.abs(mu_hat - mu) < TOL * mu
    ), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
    assert (
        np.abs(sigma_hat - sigma) < TOL * sigma
    ), f"sigma did not match: sigma = {sigma}, sigma_hat = {sigma_hat}"
    assert (
        np.abs(nu_hat - nu) < TOL * nu
    ), "nu0 did not match: nu0 = %s, nu_hat = %s" % (nu, nu_hat)
Beispiel #2
0
    [
        (
            Gaussian(
                mu=mx.nd.zeros(shape=SHAPE),
                sigma=1e-3 + 0.2 * mx.nd.ones(shape=SHAPE),
            ),
            Gaussian(
                mu=mx.nd.ones(shape=SHAPE),
                sigma=1e-3 + 0.1 * mx.nd.ones(shape=SHAPE),
            ),
            0.2 * mx.nd.ones(shape=SHAPE),
        ),
        (
            StudentT(
                mu=mx.nd.ones(shape=SHAPE),
                sigma=1e-1 + mx.nd.zeros(shape=SHAPE),
                nu=mx.nd.zeros(shape=SHAPE) + 2.2,
            ),
            Gaussian(
                mu=-mx.nd.ones(shape=SHAPE),
                sigma=1e-1 + mx.nd.zeros(shape=SHAPE),
            ),
            mx.nd.random_uniform(shape=SHAPE),
        ),
        # TODO: add a multivariate case here
    ],
)
@pytest.mark.parametrize("serialize_fn", serialize_fn_list)
def test_mixture(
    distr1: Distribution, distr2: Distribution, p: Tensor, serialize_fn
) -> None:
 ),
 Gaussian(
     mu=mx.nd.zeros(shape=BATCH_SHAPE),
     sigma=mx.nd.ones(shape=BATCH_SHAPE),
 ),
 Gamma(
     alpha=mx.nd.ones(shape=BATCH_SHAPE),
     beta=mx.nd.ones(shape=BATCH_SHAPE),
 ),
 Beta(
     alpha=0.5 * mx.nd.ones(shape=BATCH_SHAPE),
     beta=0.5 * mx.nd.ones(shape=BATCH_SHAPE),
 ),
 StudentT(
     mu=mx.nd.zeros(shape=BATCH_SHAPE),
     sigma=mx.nd.ones(shape=BATCH_SHAPE),
     nu=mx.nd.ones(shape=BATCH_SHAPE),
 ),
 Dirichlet(alpha=mx.nd.ones(shape=BATCH_SHAPE)),
 Laplace(mu=mx.nd.zeros(shape=BATCH_SHAPE),
         b=mx.nd.ones(shape=BATCH_SHAPE)),
 NegativeBinomial(
     mu=mx.nd.zeros(shape=BATCH_SHAPE),
     alpha=mx.nd.ones(shape=BATCH_SHAPE),
 ),
 Poisson(rate=mx.nd.ones(shape=BATCH_SHAPE)),
 Uniform(
     low=-mx.nd.ones(shape=BATCH_SHAPE),
     high=mx.nd.ones(shape=BATCH_SHAPE),
 ),
 PiecewiseLinear(
Beispiel #4
0
     ),
     (3, 4, 5),
     (),
 ),
 (
     Beta(
         alpha=mx.nd.ones(shape=(3, 4, 5)),
         beta=mx.nd.ones(shape=(3, 4, 5)),
     ),
     (3, 4, 5),
     (),
 ),
 (
     StudentT(
         mu=mx.nd.zeros(shape=(3, 4, 5)),
         sigma=mx.nd.ones(shape=(3, 4, 5)),
         nu=mx.nd.ones(shape=(3, 4, 5)),
     ),
     (3, 4, 5),
     (),
 ),
 (
     MultivariateGaussian(
         mu=mx.nd.zeros(shape=(3, 4, 5)),
         L=make_nd_diag(F=mx.nd, x=mx.nd.ones(shape=(3, 4, 5)), d=5),
     ),
     (3, 4),
     (5, ),
 ),
 (Dirichlet(alpha=mx.nd.ones(shape=(3, 4, 5))), (3, 4), (5, )),
 (