Esempio n. 1
0
def test_laplace(mu_b: Tuple[float, float], hybridize: bool) -> None:
    '''
    Test to check that maximizing the likelihood recovers the parameters
    '''
    # test instance
    mu, b = mu_b

    # generate samples
    mus = mx.nd.zeros((NUM_SAMPLES, )) + mu
    bs = mx.nd.zeros((NUM_SAMPLES, )) + b

    laplace_distr = Laplace(mu=mus, b=bs)
    samples = laplace_distr.sample()

    init_biases = [
        mu - START_TOL_MULTIPLE * TOL * mu,
        inv_softplus(b + START_TOL_MULTIPLE * TOL * b),
    ]

    mu_hat, b_hat = maximum_likelihood_estimate_sgd(LaplaceOutput(),
                                                    samples,
                                                    hybridize=hybridize,
                                                    init_biases=init_biases)

    assert (np.abs(mu_hat - mu) <
            TOL * mu), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
    assert (np.abs(b_hat - b) <
            TOL * b), f"b did not match: b = {b}, b_hat = {b_hat}"
Esempio n. 2
0
def test_deterministic_l1(mu: float, hybridize: bool) -> None:
    '''
    Test to check that maximizing the likelihood recovers the parameters.
    This tests uses the Laplace distribution with fixed variance and sample mean.
    This essentially reduces to determistic L1.
    '''
    # generate samples
    mu = mu
    mus = mx.nd.zeros(NUM_SAMPLES) + mu

    class LaplaceFixedVarianceOutput(LaplaceOutput):
        @classmethod
        def domain_map(cls, F, mu, b):
            b = 0.1 * F.ones_like(b)
            return mu.squeeze(axis=-1), b.squeeze(axis=-1)

    deterministic_distr = Laplace(mu=mus, b=0.1 * mx.nd.ones_like(mus))
    samples = deterministic_distr.sample()

    mu_hat, _ = maximum_likelihood_estimate_sgd(
        LaplaceFixedVarianceOutput(),
        samples,
        init_biases=[3 * mu, 0.1],
        learning_rate=PositiveFloat(1e-3),
        hybridize=hybridize,
    )

    assert (np.abs(mu_hat - mu) <
            TOL * mu), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"