예제 #1
0
    def test_batched_rhos(self):
        ar_order, steps, batch_size = 3, 100, 5
        beta_tp = np.random.randn(batch_size, ar_order)
        y_tp = np.random.randn(batch_size, steps)
        with Model() as t0:
            beta = Normal("beta",
                          0.0,
                          1.0,
                          shape=(batch_size, ar_order),
                          initval=beta_tp)
            AR("y", beta, sigma=1.0, shape=(batch_size, steps), initval=y_tp)
        with Model() as t1:
            beta = Normal("beta",
                          0.0,
                          1.0,
                          shape=(batch_size, ar_order),
                          initval=beta_tp)
            for i in range(batch_size):
                AR(f"y_{i}", beta[i], sigma=1.0, shape=steps, initval=y_tp[i])

        np.testing.assert_allclose(
            t0.compile_logp()(t0.initial_point()),
            t1.compile_logp()(t1.initial_point()),
        )

        beta_tp[1] = 0  # Should always be close to zero
        y_eval = t0["y"].eval({t0["beta"]: beta_tp})
        assert y_eval.shape == (batch_size, steps)
        assert np.all(abs(y_eval[1]) < 5)
예제 #2
0
    def test_order1_logp(self):
        data = np.array([0.3, 1, 2, 3, 4])
        phi = np.array([0.99])
        with Model() as t:
            y = AR("y", phi, sigma=1, init_dist=Flat.dist(), shape=len(data))
            z = Normal("z", mu=phi * data[:-1], sigma=1, shape=len(data) - 1)
        ar_like = t.compile_logp(y)({"y": data})
        reg_like = t.compile_logp(z)({"z": data[1:]})
        np.testing.assert_allclose(ar_like, reg_like)

        with Model() as t_constant:
            y = AR(
                "y",
                np.hstack((0.3, phi)),
                sigma=1,
                init_dist=Flat.dist(),
                shape=len(data),
                constant=True,
            )
            z = Normal("z",
                       mu=0.3 + phi * data[:-1],
                       sigma=1,
                       shape=len(data) - 1)
        ar_like = t_constant.compile_logp(y)({"y": data})
        reg_like = t_constant.compile_logp(z)({"z": data[1:]})
        np.testing.assert_allclose(ar_like, reg_like)
예제 #3
0
    def test_batched_init_dist(self):
        ar_order, steps, batch_size = 3, 100, 5
        beta_tp = aesara.shared(np.random.randn(ar_order), shape=(3, ))
        y_tp = np.random.randn(batch_size, steps)
        with Model() as t0:
            init_dist = Normal.dist(0.0, 0.01, size=(batch_size, ar_order))
            AR("y",
               beta_tp,
               sigma=0.01,
               init_dist=init_dist,
               steps=steps,
               initval=y_tp)
        with Model() as t1:
            for i in range(batch_size):
                AR(f"y_{i}", beta_tp, sigma=0.01, shape=steps, initval=y_tp[i])

        np.testing.assert_allclose(
            t0.compile_logp()(t0.initial_point()),
            t1.compile_logp()(t1.initial_point()),
        )

        # Next values should keep close to previous ones
        beta_tp.set_value(np.full((ar_order, ), 1 / ar_order))
        # Init dist is cloned when creating the AR, so the original variable is not
        # part of the AR graph. We retrieve the one actually used manually
        init_dist = t0["y"].owner.inputs[2]
        init_dist_tp = np.full((batch_size, ar_order),
                               (np.arange(batch_size) * 100)[:, None])
        y_eval = t0["y"].eval({init_dist: init_dist_tp})
        assert y_eval.shape == (batch_size, steps + ar_order)
        assert np.allclose(y_eval[:, -10:].mean(-1),
                           np.arange(batch_size) * 100,
                           rtol=0.1,
                           atol=0.5)
예제 #4
0
    def test_batched_size(self, constant):
        ar_order, steps, batch_size = 3, 100, 5
        beta_tp = np.random.randn(batch_size, ar_order + int(constant))
        y_tp = np.random.randn(batch_size, steps)
        with Model() as t0:
            y = AR("y",
                   beta_tp,
                   shape=(batch_size, steps),
                   initval=y_tp,
                   constant=constant)
        with Model() as t1:
            for i in range(batch_size):
                AR(f"y_{i}",
                   beta_tp[i],
                   sigma=1.0,
                   shape=steps,
                   initval=y_tp[i],
                   constant=constant)

        assert y.owner.op.ar_order == ar_order

        np.testing.assert_allclose(
            t0.compile_logp()(t0.initial_point()),
            t1.compile_logp()(t1.initial_point()),
        )

        y_eval = draw(y, draws=2)
        assert y_eval[0].shape == (batch_size, steps)
        assert not np.any(np.isclose(y_eval[0], y_eval[1]))
예제 #5
0
def test_double_counting():
    with Model(check_bounds=False) as m1:
        x = Gamma("x", 1, 1, size=4)

    logp_val = m1.compile_logp()({"x_log__": np.array([0, 0, 0, 0])})
    assert logp_val == -4.0

    with Model(check_bounds=False) as m2:
        x = Gamma("x", 1, 1, observed=[1, 1, 1, np.nan])

    logp_val = m2.compile_logp()({"x_missing_log__": np.array([0])})
    assert logp_val == -4.0
예제 #6
0
def test_missing_logp():
    with Model() as m:
        theta1 = Normal("theta1", 0, 5, observed=[0, 1, 2, 3, 4])
        theta2 = Normal("theta2", mu=theta1, observed=[0, 1, 2, 3, 4])
    m_logp = m.logp()

    with Model() as m_missing:
        theta1 = Normal("theta1", 0, 5, observed=np.array([0, 1, np.nan, 3, np.nan]))
        theta2 = Normal("theta2", mu=theta1, observed=np.array([np.nan, np.nan, 2, np.nan, 4]))
    m_missing_logp = m_missing.logp({"theta1_missing": [2, 4], "theta2_missing": [0, 1, 3]})

    assert m_logp == m_missing_logp
    def test_batched_sigma(self):
        ar_order, steps, batch_size = 4, 100, (7, 5)
        # AR order cannot be inferred from beta_tp because it is not fixed.
        # We specify it manually below
        beta_tp = aesara.shared(np.random.randn(ar_order))
        sigma_tp = np.abs(np.random.randn(*batch_size))
        y_tp = np.random.randn(*batch_size, steps)
        with Model() as t0:
            sigma = HalfNormal("sigma",
                               1.0,
                               shape=batch_size,
                               initval=sigma_tp)
            AR(
                "y",
                beta_tp,
                sigma=sigma,
                init_dist=Normal.dist(0, sigma[..., None]),
                size=batch_size,
                steps=steps,
                initval=y_tp,
                ar_order=ar_order,
            )
        with Model() as t1:
            sigma = HalfNormal("beta", 1.0, shape=batch_size, initval=sigma_tp)
            for i in range(batch_size[0]):
                for j in range(batch_size[1]):
                    AR(
                        f"y_{i}{j}",
                        beta_tp,
                        sigma=sigma[i][j],
                        init_dist=Normal.dist(0, sigma[i][j]),
                        shape=steps,
                        initval=y_tp[i, j],
                        ar_order=ar_order,
                    )

        # Check logp shape
        sigma_logp, y_logp = t0.compile_logp(sum=False)(t0.initial_point())
        assert tuple(y_logp.shape) == batch_size

        np.testing.assert_allclose(
            sigma_logp.sum() + y_logp.sum(),
            t1.compile_logp()(t1.initial_point()),
        )

        beta_tp.set_value(np.zeros(
            (ar_order, )))  # Should always be close to zero
        sigma_tp = np.full(batch_size, [0.01, 0.1, 1, 10, 100])
        y_eval = t0["y"].eval({t0["sigma"]: sigma_tp})
        assert y_eval.shape == (*batch_size, steps + ar_order)
        assert np.allclose(y_eval.std(axis=(0, 2)), [0.01, 0.1, 1, 10, 100],
                           rtol=0.1)
예제 #8
0
    def test_normal_mixture_nd(self, nd, ncomp):
        nd = to_tuple(nd)
        ncomp = int(ncomp)
        comp_shape = nd + (ncomp,)
        test_mus = np.random.randn(*comp_shape)
        test_taus = np.random.gamma(1, 1, size=comp_shape)
        observed = generate_normal_mixture_data(
            w=np.ones(ncomp) / ncomp, mu=test_mus, sigma=1 / np.sqrt(test_taus), size=10
        )

        with Model() as model0:
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            mixture0 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape)
            obs0 = NormalMixture(
                "obs", w=ws, mu=mus, tau=taus, comp_shape=comp_shape, observed=observed
            )

        with Model() as model1:
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            comp_dist = [
                Normal.dist(mu=mus[..., i], tau=taus[..., i], shape=nd) for i in range(ncomp)
            ]
            mixture1 = Mixture("m", w=ws, comp_dists=comp_dist, shape=nd)
            obs1 = Mixture("obs", w=ws, comp_dists=comp_dist, observed=observed)

        with Model() as model2:
            # Test that results are correct without comp_shape being passed to the Mixture.
            # This used to fail in V3
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
            obs2 = NormalMixture("obs", w=ws, mu=mus, tau=taus, observed=observed)

        testpoint = model0.compute_initial_point()
        testpoint["mus"] = test_mus
        testpoint["taus_log__"] = np.log(test_taus)
        for logp0, logp1, logp2 in zip(
            model0.compile_logp(vars=[mixture0, obs0], sum=False)(testpoint),
            model1.compile_logp(vars=[mixture1, obs1], sum=False)(testpoint),
            model2.compile_logp(vars=[mixture2, obs2], sum=False)(testpoint),
        ):
            assert_allclose(logp0, logp1)
            assert_allclose(logp0, logp2)
예제 #9
0
    def test_vector_components(self):
        nd = 3
        npop = 4
        # [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]
        mus = at.constant(np.full((nd, npop), np.arange(npop)))

        with Model(rng_seeder=self.get_random_state()) as model:
            m = Mixture(
                "m",
                w=np.ones(npop) / npop,
                # MvNormal distribution with squared sigma diagonal covariance should
                # be equal to vector of Normals from latent_m
                comp_dists=[MvNormal.dist(mus[:, i], np.eye(nd) * 1e-5**2) for i in range(npop)],
            )
            z = Categorical("z", p=np.ones(npop) / npop)
            latent_m = Normal("latent_m", mu=mus[..., z], sigma=1e-5, shape=nd)

        size = 100
        m_val = draw(m, draws=size)
        latent_m_val = draw(latent_m, draws=size)
        assert m_val.shape == latent_m_val.shape
        # Test that each element in axis = -1 comes from the same mixture
        # component
        assert np.all(np.diff(m_val) < 1e-3)
        assert np.all(np.diff(latent_m_val) < 1e-3)
        # TODO: The following statistical test appears to be more flaky than expected
        #  even though the  distributions should be the same. Seeding should make it
        #  stable but might be worth investigating further
        self.samples_from_same_distribution(m_val, latent_m_val)

        # Check that mixing of values in the last axis leads to smaller logp
        logp_fn = model.compile_logp(vars=[m])
        assert logp_fn({"m": [0, 0, 0]}) > logp_fn({"m": [0, 1, 0]}) > logp_fn({"m": [0, 1, 2]})
        self.logp_matches(m, latent_m, z, npop, model=model)
예제 #10
0
def test_ignore_logprob_model():
    # logp that does not depend on input
    def logp(value, x):
        return value

    with Model() as m:
        x = Normal.dist()
        y = DensityDist("y", x, logp=logp)
    # Aeppl raises a KeyError when it finds an unexpected RV
    with pytest.raises(KeyError):
        joint_logp([y], {y: y.type()})

    with Model() as m:
        x = ignore_logprob(Normal.dist())
        y = DensityDist("y", x, logp=logp)
    assert joint_logp([y], {y: y.type()})
예제 #11
0
def test_AR_nd():
    # AR2 multidimensional
    p, T, n = 3, 100, 5
    beta_tp = np.random.randn(p, n)
    y_tp = np.random.randn(T, n)
    with Model() as t0:
        beta = Normal("beta", 0.0, 1.0, shape=(p, n), initval=beta_tp)
        AR("y", beta, sigma=1.0, shape=(T, n), initval=y_tp)

    with Model() as t1:
        beta = Normal("beta", 0.0, 1.0, shape=(p, n), initval=beta_tp)
        for i in range(n):
            AR("y_%d" % i, beta[:, i], sigma=1.0, shape=T, initval=y_tp[:, i])

    np.testing.assert_allclose(t0.logp(t0.recompute_initial_point()),
                               t1.logp(t1.recompute_initial_point()))
예제 #12
0
    def test_with_multinomial(self, batch_shape):
        p = np.random.uniform(size=(*batch_shape, self.mixture_comps, 3))
        n = 100 * np.ones((*batch_shape, 1))
        w = np.ones(self.mixture_comps) / self.mixture_comps
        mixture_axis = len(batch_shape)
        with Model() as model:
            comp_dists = Multinomial.dist(p=p, n=n, shape=(*batch_shape, self.mixture_comps, 3))
            mixture = Mixture(
                "mixture",
                w=w,
                comp_dists=comp_dists,
                shape=(*batch_shape, 3),
            )
            prior = sample_prior_predictive(samples=self.n_samples, return_inferencedata=False)

        assert prior["mixture"].shape == (self.n_samples, *batch_shape, 3)
        assert draw(mixture, draws=self.size).shape == (self.size, *batch_shape, 3)

        if aesara.config.floatX == "float32":
            rtol = 1e-4
        else:
            rtol = 1e-7

        initial_point = model.compute_initial_point()
        comp_logp = logp(comp_dists, initial_point["mixture"].reshape(*batch_shape, 1, 3))
        log_sum_exp = logsumexp(
            comp_logp.eval() + np.log(w), axis=mixture_axis, keepdims=True
        ).sum()
        assert_allclose(
            model.compile_logp()(initial_point),
            log_sum_exp,
            rtol,
        )
예제 #13
0
    def test_with_mvnormal(self):
        # 10 batch, 3-variate Gaussian
        mu = np.random.randn(self.mixture_comps, 3)
        mat = np.random.randn(3, 3)
        cov = mat @ mat.T
        chol = np.linalg.cholesky(cov)
        w = np.ones(self.mixture_comps) / self.mixture_comps

        with Model() as model:
            comp_dists = MvNormal.dist(mu=mu, chol=chol, shape=(self.mixture_comps, 3))
            mixture = Mixture("mixture", w=w, comp_dists=comp_dists, shape=(3,))
            prior = sample_prior_predictive(samples=self.n_samples, return_inferencedata=False)

        assert prior["mixture"].shape == (self.n_samples, 3)
        assert draw(mixture, draws=self.size).shape == (self.size, 3)

        if aesara.config.floatX == "float32":
            rtol = 1e-4
        else:
            rtol = 1e-7

        initial_point = model.compute_initial_point()
        comp_logp = logp(comp_dists, initial_point["mixture"].reshape(1, 3))
        log_sum_exp = logsumexp(comp_logp.eval() + np.log(w), axis=0, keepdims=True).sum()
        assert_allclose(
            model.compile_logp()(initial_point),
            log_sum_exp,
            rtol,
        )
예제 #14
0
def test_logpt_basic():
    """Make sure we can compute a log-likelihood for a hierarchical model with transforms."""

    with Model() as m:
        a = Uniform("a", 0.0, 1.0)
        c = Normal("c")
        b_l = c * a + 2.0
        b = Uniform("b", b_l, b_l + 1.0)

    a_value_var = m.rvs_to_values[a]
    assert a_value_var.tag.transform

    b_value_var = m.rvs_to_values[b]
    assert b_value_var.tag.transform

    c_value_var = m.rvs_to_values[c]

    b_logp = logpt(b, b_value_var, sum=False)

    res_ancestors = list(walk_model((b_logp,), walk_past_rvs=True))
    res_rv_ancestors = [
        v for v in res_ancestors if v.owner and isinstance(v.owner.op, RandomVariable)
    ]

    # There shouldn't be any `RandomVariable`s in the resulting graph
    assert len(res_rv_ancestors) == 0
    assert b_value_var in res_ancestors
    assert c_value_var in res_ancestors
    assert a_value_var in res_ancestors
예제 #15
0
def test_simulator_moment(mu, sigma, size):
    def normal_sim(rng, mu, sigma, size):
        return rng.normal(mu, sigma, size=size)

    with Model() as model:
        x = Simulator("x", normal_sim, mu, sigma, size=size)

    fn = make_initial_point_fn(
        model=model,
        return_transformed=False,
        default_strategy="moment",
    )

    random_draw = model["x"].eval()
    result = fn(0)["x"]
    assert result.shape == random_draw.shape

    # We perform a z-test between the moment and expected mean from a sample of 10 draws
    # This test fails if the number of samples averaged in get_moment(Simulator)
    # is much smaller than 10, but would not catch the case where the number of samples
    # is higher than the expected 10

    n = 10  # samples
    expected_sample_mean = mu
    expected_sample_mean_std = np.sqrt(sigma**2 / n)

    # Multiple test adjustment for z-test to maintain alpha=0.01
    alpha = 0.01
    alpha /= 2 * 2 * 3  # Correct for number of test permutations
    alpha /= random_draw.size  # Correct for distribution size
    cutoff = st.norm().ppf(1 - (alpha / 2))

    assert np.all(
        np.abs((result - expected_sample_mean) /
               expected_sample_mean_std) < cutoff)
예제 #16
0
    def test_scalar_components(self):
        nd = 3
        npop = 4
        # [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]
        mus = at.constant(np.full((nd, npop), np.arange(npop)))

        with Model(rng_seeder=self.get_random_state()) as model:
            m = NormalMixture(
                "m",
                w=np.ones(npop) / npop,
                mu=mus,
                sigma=1e-5,
                comp_shape=(nd, npop),
                shape=nd,
            )
            z = Categorical("z", p=np.ones(npop) / npop, shape=nd)
            mu = at.as_tensor_variable([mus[i, z[i]] for i in range(nd)])
            latent_m = Normal("latent_m", mu=mu, sigma=1e-5, shape=nd)

        size = 100
        m_val = draw(m, draws=size)
        latent_m_val = draw(latent_m, draws=size)

        assert m_val.shape == latent_m_val.shape
        # Test that each element in axis = -1 can come from independent
        # components
        assert not all(np.all(np.diff(m_val) < 1e-3, axis=-1))
        assert not all(np.all(np.diff(latent_m_val) < 1e-3, axis=-1))
        self.samples_from_same_distribution(m_val, latent_m_val)

        # Check that logp is the same whether elements of the last axis are mixed or not
        logp_fn = model.compile_logp(vars=[m])
        assert np.isclose(logp_fn({"m": [0, 0, 0]}), logp_fn({"m": [0, 1, 2]}))
        self.logp_matches(m, latent_m, z, npop, model=model)
예제 #17
0
def test_density_dist_default_moment_univariate(get_moment, size, expected):
    if get_moment == "custom_moment":
        get_moment = lambda rv, size, *rv_inputs: 5 * at.ones(size,
                                                              dtype=rv.dtype)
    with Model() as model:
        DensityDist("x", get_moment=get_moment, size=size)
    assert_moment_is_expected(model, expected)
예제 #18
0
    def test_list_normals_sampling(self):
        norm_w = np.array([0.75, 0.25])
        norm_mu = np.array([0.0, 5.0])
        norm_sigma = np.ones_like(norm_mu)
        norm_x = generate_normal_mixture_data(norm_w, norm_mu, norm_sigma, size=1000)

        with Model() as model:
            w = Dirichlet("w", floatX(np.ones_like(norm_w)), shape=norm_w.size)
            mu = Normal("mu", 0.0, 10.0, shape=norm_w.size)
            tau = Gamma("tau", 1.0, 1.0, shape=norm_w.size)
            Mixture(
                "x_obs",
                w,
                [Normal.dist(mu[0], tau=tau[0]), Normal.dist(mu[1], tau=tau[1])],
                observed=norm_x,
            )
            trace = sample(
                5000,
                chains=1,
                step=Metropolis(),
                random_seed=self.random_seed,
                progressbar=False,
                return_inferencedata=False,
            )

        assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(norm_w), rtol=0.1, atol=0.1)
        assert_allclose(np.sort(trace["mu"].mean(axis=0)), np.sort(norm_mu), rtol=0.1, atol=0.1)
예제 #19
0
    def test_list_mvnormals_logp(self):
        mu1 = np.asarray([0.0, 1.0])
        cov1 = np.diag([1.5, 2.5])
        mu2 = np.asarray([1.0, 0.0])
        cov2 = np.diag([2.5, 3.5])
        obs = np.asarray([[0.5, 0.5], mu1, mu2])
        with Model() as model:
            w = Dirichlet("w", floatX(np.ones(2)), transform=None, shape=(2,))
            mvncomp1 = MvNormal.dist(mu=mu1, cov=cov1)
            mvncomp2 = MvNormal.dist(mu=mu2, cov=cov2)
            y = Mixture("x_obs", w, [mvncomp1, mvncomp2], observed=obs)

        # check logp of each component
        complogp_st = np.vstack(
            (
                st.multivariate_normal.logpdf(obs, mu1, cov1),
                st.multivariate_normal.logpdf(obs, mu2, cov2),
            )
        ).T

        # check logp of mixture
        testpoint = model.compute_initial_point()
        mixlogp_st = logsumexp(np.log(testpoint["w"]) + complogp_st, axis=-1, keepdims=False)
        assert_allclose(model.compile_logp(y, sum=False)(testpoint)[0], mixlogp_st)

        # check logp of model
        priorlogp = st.dirichlet.logpdf(
            x=testpoint["w"],
            alpha=np.ones(2),
        )
        assert_allclose(model.compile_logp()(testpoint), mixlogp_st.sum() + priorlogp)
예제 #20
0
def test_unexpected_rvs():
    with Model() as model:
        x = Normal("x")
        y = DensityDist("y", logp=lambda *args: x)

    with pytest.raises(ValueError,
                       match="^Random variables detected in the logp graph"):
        model.logp()
예제 #21
0
    def test_broadcasting_in_shape(self):
        with Model() as model:
            mu = Gamma("mu", 1.0, 1.0, shape=2)
            comp_dists = Poisson.dist(mu, shape=2)
            mix = Mixture("mix", w=np.ones(2) / 2, comp_dists=comp_dists, shape=(1000,))
            prior = sample_prior_predictive(samples=self.n_samples, return_inferencedata=False)

        assert prior["mix"].shape == (self.n_samples, 1000)
예제 #22
0
 def test_component_choice_random(self):
     """Test that mixture choices change over evaluations"""
     with Model() as m:
         weights = [0.5, 0.5]
         components = [Normal.dist(-10, 0.01), Normal.dist(10, 0.01)]
         mix = Mixture.dist(weights, components)
     draws = draw(mix, draws=10)
     # Probability of coming from same component 10 times is 0.5**10
     assert np.unique(draws > 0).size == 2
예제 #23
0
 def test_moment(self, mu, sigma, init, steps, size, expected):
     with Model() as model:
         GaussianRandomWalk("x",
                            mu=mu,
                            sigma=sigma,
                            init=init,
                            steps=steps,
                            size=size)
     assert_moment_is_expected(model, expected)
예제 #24
0
def test_zero_inflated_negative_binomial_moment(psi, mu, alpha, size,
                                                expected):
    with Model() as model:
        ZeroInflatedNegativeBinomial("x",
                                     psi=psi,
                                     mu=mu,
                                     alpha=alpha,
                                     size=size)
    assert_moment_is_expected(model, expected)
예제 #25
0
def test_truncatednormal_moment(mu, sigma, lower, upper, size, expected):
    with Model() as model:
        TruncatedNormal("x",
                        mu=mu,
                        sigma=sigma,
                        lower=lower,
                        upper=upper,
                        size=size)
    assert_moment_is_expected(model, expected)
예제 #26
0
    def test_float32_MLDA(self):
        data = np.random.randn(5).astype("float32")

        with Model() as coarse_model:
            x = Normal("x", initval=np.array(1.0, dtype="float32"))
            obs = Normal("obs", mu=x, sigma=1.0, observed=data + 0.5)

        with Model() as model:
            x = Normal("x", initval=np.array(1.0, dtype="float32"))
            obs = Normal("obs", mu=x, sigma=1.0, observed=data)

        assert x.dtype == "float32"
        assert obs.dtype == "float32"

        with model:
            sample(draws=10,
                   tune=10,
                   chains=1,
                   step=MLDA(coarse_models=[coarse_model]))
예제 #27
0
def test_AR():
    # AR1
    data = np.array([0.3, 1, 2, 3, 4])
    phi = np.array([0.99])
    with Model() as t:
        y = AR("y", phi, sigma=1, shape=len(data))
        z = Normal("z", mu=phi * data[:-1], sigma=1, shape=len(data) - 1)
    ar_like = t["y"].logp({"z": data[1:], "y": data})
    reg_like = t["z"].logp({"z": data[1:], "y": data})
    np.testing.assert_allclose(ar_like, reg_like)

    # AR1 and AR(1)
    with Model() as t:
        rho = Normal("rho", 0.0, 1.0)
        y1 = AR1("y1", rho, 1.0, observed=data)
        y2 = AR("y2", rho, 1.0, init=Normal.dist(0, 1), observed=data)
    initial_point = t.recompute_initial_point()
    np.testing.assert_allclose(y1.logp(initial_point), y2.logp(initial_point))

    # AR1 + constant
    with Model() as t:
        y = AR("y",
               np.hstack((0.3, phi)),
               sigma=1,
               shape=len(data),
               constant=True)
        z = Normal("z", mu=0.3 + phi * data[:-1], sigma=1, shape=len(data) - 1)
    ar_like = t["y"].logp({"z": data[1:], "y": data})
    reg_like = t["z"].logp({"z": data[1:], "y": data})
    np.testing.assert_allclose(ar_like, reg_like)

    # AR2
    phi = np.array([0.84, 0.10])
    with Model() as t:
        y = AR("y", phi, sigma=1, shape=len(data))
        z = Normal("z",
                   mu=phi[0] * data[1:-1] + phi[1] * data[:-2],
                   sigma=1,
                   shape=len(data) - 2)
    ar_like = t["y"].logp({"z": data[2:], "y": data})
    reg_like = t["z"].logp({"z": data[2:], "y": data})
    np.testing.assert_allclose(ar_like, reg_like)
예제 #28
0
def test_missing_multivariate():
    """Test model with missing variables whose transform changes base shape still works"""

    with Model() as m_miss:
        with pytest.raises(
            NotImplementedError,
            match="Automatic inputation is only supported for univariate RandomVariables",
        ):
            x = Dirichlet(
                "x", a=[1, 2, 3], observed=np.array([[0.3, 0.3, 0.4], [np.nan, np.nan, np.nan]])
            )
예제 #29
0
def test_model_unchanged_logprob_access():
    # Issue #5007
    with Model() as model:
        a = Normal("a")
        c = Uniform("c", lower=a - 1, upper=1)

    original_inputs = set(aesara.graph.graph_inputs([c]))
    # Extract model.logpt
    model.logpt
    new_inputs = set(aesara.graph.graph_inputs([c]))
    assert original_inputs == new_inputs
예제 #30
0
def test_interval_missing_observations():
    with Model() as model:
        obs1 = ma.masked_values([1, 2, -1, 4, -1], value=-1)
        obs2 = ma.masked_values([-1, -1, 6, -1, 8], value=-1)

        rng = aesara.shared(np.random.RandomState(2323), borrow=True)

        with pytest.warns(ImputationWarning):
            theta1 = Uniform("theta1", 0, 5, observed=obs1, rng=rng)
        with pytest.warns(ImputationWarning):
            theta2 = Normal("theta2", mu=theta1, observed=obs2, rng=rng)

        assert "theta1_observed" in model.named_vars
        assert "theta1_missing_interval__" in model.named_vars
        assert not hasattr(
            model.rvs_to_values[model.named_vars["theta1_observed"]].tag, "transform"
        )

        prior_trace = sample_prior_predictive(return_inferencedata=False)

        # Make sure the observed + missing combined deterministics have the
        # same shape as the original observations vectors
        assert prior_trace["theta1"].shape[-1] == obs1.shape[0]
        assert prior_trace["theta2"].shape[-1] == obs2.shape[0]

        # Make sure that the observed values are newly generated samples
        assert np.all(np.var(prior_trace["theta1_observed"], 0) > 0.0)
        assert np.all(np.var(prior_trace["theta2_observed"], 0) > 0.0)

        # Make sure the missing parts of the combined deterministic matches the
        # sampled missing and observed variable values
        assert np.mean(prior_trace["theta1"][:, obs1.mask] - prior_trace["theta1_missing"]) == 0.0
        assert np.mean(prior_trace["theta1"][:, ~obs1.mask] - prior_trace["theta1_observed"]) == 0.0
        assert np.mean(prior_trace["theta2"][:, obs2.mask] - prior_trace["theta2_missing"]) == 0.0
        assert np.mean(prior_trace["theta2"][:, ~obs2.mask] - prior_trace["theta2_observed"]) == 0.0

        assert {"theta1", "theta2"} <= set(prior_trace.keys())

        trace = sample(
            chains=1, draws=50, compute_convergence_checks=False, return_inferencedata=False
        )

        assert np.all(0 < trace["theta1_missing"].mean(0))
        assert np.all(0 < trace["theta2_missing"].mean(0))
        assert "theta1" not in trace.varnames
        assert "theta2" not in trace.varnames

        # Make sure that the observed values are newly generated samples and that
        # the observed and deterministic matche
        pp_trace = sample_posterior_predictive(trace, return_inferencedata=False, keep_size=False)
        assert np.all(np.var(pp_trace["theta1"], 0) > 0.0)
        assert np.all(np.var(pp_trace["theta2"], 0) > 0.0)
        assert np.mean(pp_trace["theta1"][:, ~obs1.mask] - pp_trace["theta1_observed"]) == 0.0
        assert np.mean(pp_trace["theta2"][:, ~obs2.mask] - pp_trace["theta2_observed"]) == 0.0