Exemplo n.º 1
0
def test_laplace(mu_b: Tuple[float, float], hybridize: bool) -> None:
    '''
    Test to check that maximizing the likelihood recovers the parameters
    '''
    # test instance
    mu, b = mu_b

    # generate samples
    mus = mx.nd.zeros((NUM_SAMPLES, )) + mu
    bs = mx.nd.zeros((NUM_SAMPLES, )) + b

    laplace_distr = Laplace(mu=mus, b=bs)
    samples = laplace_distr.sample()

    init_biases = [
        mu - START_TOL_MULTIPLE * TOL * mu,
        inv_softplus(b + START_TOL_MULTIPLE * TOL * b),
    ]

    mu_hat, b_hat = maximum_likelihood_estimate_sgd(LaplaceOutput(),
                                                    samples,
                                                    hybridize=hybridize,
                                                    init_biases=init_biases)

    assert (np.abs(mu_hat - mu) <
            TOL * mu), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
    assert (np.abs(b_hat - b) <
            TOL * b), f"b did not match: b = {b}, b_hat = {b_hat}"
Exemplo n.º 2
0
def test_deterministic_l1(mu: float, hybridize: bool) -> None:
    '''
    Test to check that maximizing the likelihood recovers the parameters.
    This tests uses the Laplace distribution with fixed variance and sample mean.
    This essentially reduces to determistic L1.
    '''
    # generate samples
    mu = mu
    mus = mx.nd.zeros(NUM_SAMPLES) + mu

    class LaplaceFixedVarianceOutput(LaplaceOutput):
        @classmethod
        def domain_map(cls, F, mu, b):
            b = 0.1 * F.ones_like(b)
            return mu.squeeze(axis=-1), b.squeeze(axis=-1)

    deterministic_distr = Laplace(mu=mus, b=0.1 * mx.nd.ones_like(mus))
    samples = deterministic_distr.sample()

    mu_hat, _ = maximum_likelihood_estimate_sgd(
        LaplaceFixedVarianceOutput(),
        samples,
        init_biases=[3 * mu, 0.1],
        learning_rate=PositiveFloat(1e-3),
        hybridize=hybridize,
    )

    assert (np.abs(mu_hat - mu) <
            TOL * mu), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
Exemplo n.º 3
0
     MultivariateGaussian(
         mu=mx.nd.zeros(shape=(3, 4, 5)),
         L=make_nd_diag(F=mx.nd, x=mx.nd.ones(shape=(3, 4, 5)), d=5),
     ),
     (3, 4),
     (5, ),
 ),
 (Dirichlet(alpha=mx.nd.ones(shape=(3, 4, 5))), (3, 4), (5, )),
 (
     DirichletMultinomial(
         dim=5, n_trials=9, alpha=mx.nd.ones(shape=(3, 4, 5))),
     (3, 4),
     (5, ),
 ),
 (
     Laplace(mu=mx.nd.zeros(shape=(3, 4, 5)),
             b=mx.nd.ones(shape=(3, 4, 5))),
     (3, 4, 5),
     (),
 ),
 (
     NegativeBinomial(
         mu=mx.nd.zeros(shape=(3, 4, 5)),
         alpha=mx.nd.ones(shape=(3, 4, 5)),
     ),
     (3, 4, 5),
     (),
 ),
 (
     Uniform(
         low=-mx.nd.ones(shape=(3, 4, 5)),
         high=mx.nd.ones(shape=(3, 4, 5)),
Exemplo n.º 4
0
 Gamma(
     alpha=mx.nd.ones(shape=BATCH_SHAPE),
     beta=mx.nd.ones(shape=BATCH_SHAPE),
 ),
 Beta(
     alpha=0.5 * mx.nd.ones(shape=BATCH_SHAPE),
     beta=0.5 * mx.nd.ones(shape=BATCH_SHAPE),
 ),
 StudentT(
     mu=mx.nd.zeros(shape=BATCH_SHAPE),
     sigma=mx.nd.ones(shape=BATCH_SHAPE),
     nu=mx.nd.ones(shape=BATCH_SHAPE),
 ),
 Dirichlet(alpha=mx.nd.ones(shape=BATCH_SHAPE)),
 Laplace(
     mu=mx.nd.zeros(shape=BATCH_SHAPE), b=mx.nd.ones(shape=BATCH_SHAPE)
 ),
 NegativeBinomial(
     mu=mx.nd.zeros(shape=BATCH_SHAPE),
     alpha=mx.nd.ones(shape=BATCH_SHAPE),
 ),
 Poisson(rate=mx.nd.ones(shape=BATCH_SHAPE)),
 Uniform(
     low=-mx.nd.ones(shape=BATCH_SHAPE),
     high=mx.nd.ones(shape=BATCH_SHAPE),
 ),
 PiecewiseLinear(
     gamma=mx.nd.ones(shape=BATCH_SHAPE),
     slopes=mx.nd.ones(shape=(3, 4, 5, 10)),
     knot_spacings=mx.nd.ones(shape=(3, 4, 5, 10)) / 10,
 ),