Ejemplo n.º 1
0
    def test_normal_mixture_nd(self, nd, ncomp):
        nd = to_tuple(nd)
        ncomp = int(ncomp)
        comp_shape = nd + (ncomp,)
        test_mus = np.random.randn(*comp_shape)
        test_taus = np.random.gamma(1, 1, size=comp_shape)
        observed = generate_normal_mixture_data(
            w=np.ones(ncomp) / ncomp, mu=test_mus, sigma=1 / np.sqrt(test_taus), size=10
        )

        with Model() as model0:
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            mixture0 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape)
            obs0 = NormalMixture(
                "obs", w=ws, mu=mus, tau=taus, comp_shape=comp_shape, observed=observed
            )

        with Model() as model1:
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            comp_dist = [
                Normal.dist(mu=mus[..., i], tau=taus[..., i], shape=nd) for i in range(ncomp)
            ]
            mixture1 = Mixture("m", w=ws, comp_dists=comp_dist, shape=nd)
            obs1 = Mixture("obs", w=ws, comp_dists=comp_dist, observed=observed)

        with Model() as model2:
            # Test that results are correct without comp_shape being passed to the Mixture.
            # This used to fail in V3
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
            obs2 = NormalMixture("obs", w=ws, mu=mus, tau=taus, observed=observed)

        testpoint = model0.compute_initial_point()
        testpoint["mus"] = test_mus
        testpoint["taus_log__"] = np.log(test_taus)
        for logp0, logp1, logp2 in zip(
            model0.compile_logp(vars=[mixture0, obs0], sum=False)(testpoint),
            model1.compile_logp(vars=[mixture1, obs1], sum=False)(testpoint),
            model2.compile_logp(vars=[mixture2, obs2], sum=False)(testpoint),
        ):
            assert_allclose(logp0, logp1)
            assert_allclose(logp0, logp2)
Ejemplo n.º 2
0
    def test_list_mvnormals_logp(self):
        mu1 = np.asarray([0.0, 1.0])
        cov1 = np.diag([1.5, 2.5])
        mu2 = np.asarray([1.0, 0.0])
        cov2 = np.diag([2.5, 3.5])
        obs = np.asarray([[0.5, 0.5], mu1, mu2])
        with Model() as model:
            w = Dirichlet("w", floatX(np.ones(2)), transform=None, shape=(2,))
            mvncomp1 = MvNormal.dist(mu=mu1, cov=cov1)
            mvncomp2 = MvNormal.dist(mu=mu2, cov=cov2)
            y = Mixture("x_obs", w, [mvncomp1, mvncomp2], observed=obs)

        # check logp of each component
        complogp_st = np.vstack(
            (
                st.multivariate_normal.logpdf(obs, mu1, cov1),
                st.multivariate_normal.logpdf(obs, mu2, cov2),
            )
        ).T

        # check logp of mixture
        testpoint = model.compute_initial_point()
        mixlogp_st = logsumexp(np.log(testpoint["w"]) + complogp_st, axis=-1, keepdims=False)
        assert_allclose(model.compile_logp(y, sum=False)(testpoint)[0], mixlogp_st)

        # check logp of model
        priorlogp = st.dirichlet.logpdf(
            x=testpoint["w"],
            alpha=np.ones(2),
        )
        assert_allclose(model.compile_logp()(testpoint), mixlogp_st.sum() + priorlogp)
Ejemplo n.º 3
0
    def test_list_normals_sampling(self):
        norm_w = np.array([0.75, 0.25])
        norm_mu = np.array([0.0, 5.0])
        norm_sigma = np.ones_like(norm_mu)
        norm_x = generate_normal_mixture_data(norm_w, norm_mu, norm_sigma, size=1000)

        with Model() as model:
            w = Dirichlet("w", floatX(np.ones_like(norm_w)), shape=norm_w.size)
            mu = Normal("mu", 0.0, 10.0, shape=norm_w.size)
            tau = Gamma("tau", 1.0, 1.0, shape=norm_w.size)
            Mixture(
                "x_obs",
                w,
                [Normal.dist(mu[0], tau=tau[0]), Normal.dist(mu[1], tau=tau[1])],
                observed=norm_x,
            )
            trace = sample(
                5000,
                chains=1,
                step=Metropolis(),
                random_seed=self.random_seed,
                progressbar=False,
                return_inferencedata=False,
            )

        assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(norm_w), rtol=0.1, atol=0.1)
        assert_allclose(np.sort(trace["mu"].mean(axis=0)), np.sort(norm_mu), rtol=0.1, atol=0.1)
Ejemplo n.º 4
0
    def test_mixture_dtype(self):
        mix_dtype = Mixture.dist(
            w=[0.5, 0.5],
            comp_dists=[
                Multinomial.dist(n=5, p=[0.5, 0.5]),
                Multinomial.dist(n=5, p=[0.5, 0.5]),
            ],
        ).dtype
        assert mix_dtype == "int64"

        mix_dtype = Mixture.dist(
            w=[0.5, 0.5],
            comp_dists=[
                Dirichlet.dist(a=[0.5, 0.5]),
                Dirichlet.dist(a=[0.5, 0.5]),
            ],
        ).dtype
        assert mix_dtype == aesara.config.floatX
Ejemplo n.º 5
0
    def test_change_size(self, comp_dists, expand):
        univariate = comp_dists[0].owner.op.ndim_supp == 0

        mix = Mixture.dist(w=Dirichlet.dist([1, 1]), comp_dists=comp_dists)
        mix = Mixture.change_size(mix, new_size=(4,), expand=expand)
        draws = mix.eval()
        expected_shape = (4,) if univariate else (4, 3)
        assert draws.shape == expected_shape
        assert np.unique(draws).size == draws.size

        mix = Mixture.dist(w=Dirichlet.dist([1, 1]), comp_dists=comp_dists, size=(3,))
        mix = Mixture.change_size(mix, new_size=(5, 4), expand=expand)
        draws = mix.eval()
        expected_shape = (5, 4) if univariate else (5, 4, 3)
        if expand:
            expected_shape = expected_shape + (3,)
        assert draws.shape == expected_shape
        assert np.unique(draws).size == draws.size
Ejemplo n.º 6
0
def test_missing_multivariate():
    """Test model with missing variables whose transform changes base shape still works"""

    with Model() as m_miss:
        with pytest.raises(
            NotImplementedError,
            match="Automatic inputation is only supported for univariate RandomVariables",
        ):
            x = Dirichlet(
                "x", a=[1, 2, 3], observed=np.array([[0.3, 0.3, 0.4], [np.nan, np.nan, np.nan]])
            )
Ejemplo n.º 7
0
    def test_components_expanded_by_weights(self, comp_dists):
        """Test that components are expanded when size or weights are larger than components"""
        univariate = comp_dists[0].owner.op.ndim_supp == 0

        mix = Mixture.dist(
            w=Dirichlet.dist([1, 1], shape=(3, 2)),
            comp_dists=comp_dists,
            size=(3,),
        )
        draws = mix.eval()
        assert draws.shape == (3,) if univariate else (3, 3)
        assert np.unique(draws).size == draws.size

        mix = Mixture.dist(
            w=Dirichlet.dist([1, 1], shape=(4, 3, 2)),
            comp_dists=comp_dists,
            size=(3,),
        )
        draws = mix.eval()
        assert draws.shape == (4, 3) if univariate else (4, 3, 3)
        assert np.unique(draws).size == draws.size
Ejemplo n.º 8
0
    def test_single_poisson_predictive_sampling_shape(self):
        # test the shape broadcasting in mixture random
        rng = self.get_random_state()
        y = np.concatenate([rng.poisson(5, size=10), rng.poisson(9, size=10)])
        with Model() as model:
            comp0 = Poisson.dist(mu=np.ones(2))
            w0 = Dirichlet("w0", a=np.ones(2), shape=(2,))
            like0 = Mixture("like0", w=w0, comp_dists=comp0, observed=y)

            comp1 = Poisson.dist(mu=np.ones((20, 2)), shape=(20, 2))
            w1 = Dirichlet("w1", a=np.ones(2), shape=(2,))
            like1 = Mixture("like1", w=w1, comp_dists=comp1, observed=y)

            comp2 = Poisson.dist(mu=np.ones(2))
            w2 = Dirichlet("w2", a=np.ones(2), shape=(20, 2))
            like2 = Mixture("like2", w=w2, comp_dists=comp2, observed=y)

            comp3 = Poisson.dist(mu=np.ones(2), shape=(20, 2))
            w3 = Dirichlet("w3", a=np.ones(2), shape=(20, 2))
            like3 = Mixture("like3", w=w3, comp_dists=comp3, observed=y)

        n_samples = 30
        with model:
            prior = sample_prior_predictive(samples=n_samples, return_inferencedata=False)
            ppc = sample_posterior_predictive(
                [self.get_inital_point(model)], samples=n_samples, return_inferencedata=False
            )

        assert prior["like0"].shape == (n_samples, 20)
        assert prior["like1"].shape == (n_samples, 20)
        assert prior["like2"].shape == (n_samples, 20)
        assert prior["like3"].shape == (n_samples, 20)

        assert ppc["like0"].shape == (n_samples, 20)
        assert ppc["like1"].shape == (n_samples, 20)
        assert ppc["like2"].shape == (n_samples, 20)
        assert ppc["like3"].shape == (n_samples, 20)
Ejemplo n.º 9
0
    def test_list_poissons_sampling(self):
        pois_w = np.array([0.4, 0.6])
        pois_mu = np.array([5.0, 20.0])
        pois_x = generate_poisson_mixture_data(pois_w, pois_mu, size=1000)

        with Model() as model:
            w = Dirichlet("w", floatX(np.ones_like(pois_w)), shape=pois_w.shape)
            mu = Gamma("mu", 1.0, 1.0, shape=pois_w.size)
            Mixture("x_obs", w, [Poisson.dist(mu[0]), Poisson.dist(mu[1])], observed=pois_x)
            trace = sample(
                5000,
                chains=1,
                step=Metropolis(),
                random_seed=self.random_seed,
                progressbar=False,
                return_inferencedata=False,
            )

        assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(pois_w), rtol=0.1, atol=0.1)
        assert_allclose(np.sort(trace["mu"].mean(axis=0)), np.sort(pois_mu), rtol=0.1, atol=0.1)
Ejemplo n.º 10
0
    def test_list_mvnormals_predictive_sampling_shape(self):
        N = 100  # number of data points
        K = 3  # number of mixture components
        D = 3  # dimensionality of the data
        X = MvNormal.dist(np.zeros(D), np.eye(D), size=N).eval()

        with Model() as model:
            pi = Dirichlet("pi", np.ones(K), shape=(K,))

            comp_dist = []
            mu = []
            packed_chol = []
            chol = []
            for i in range(K):
                mu.append(Normal(f"mu{i}", 0, 10, shape=D))
                packed_chol.append(
                    LKJCholeskyCov(
                        f"chol_cov_{i}",
                        eta=2,
                        n=D,
                        sd_dist=HalfNormal.dist(2.5, size=D),
                        compute_corr=False,
                    )
                )
                chol.append(expand_packed_triangular(D, packed_chol[i], lower=True))
                comp_dist.append(MvNormal.dist(mu=mu[i], chol=chol[i], shape=D))

            Mixture("x_obs", pi, comp_dist, observed=X)

        n_samples = 20
        with model:
            prior = sample_prior_predictive(samples=n_samples, return_inferencedata=False)
            ppc = sample_posterior_predictive(
                [self.get_inital_point(model)], samples=n_samples, return_inferencedata=False
            )
        assert ppc["x_obs"].shape == (n_samples,) + X.shape
        assert prior["x_obs"].shape == (n_samples,) + X.shape
        assert prior["mu0"].shape == (n_samples, D)
        assert prior["chol_cov_0"].shape == (n_samples, D * (D + 1) // 2)
Ejemplo n.º 11
0
    def test_nested_mixture(self):
        if aesara.config.floatX == "float32":
            rtol = 1e-4
        else:
            rtol = 1e-7
        nbr = 4
        with Model() as model:
            # mixtures components
            g_comp = Normal.dist(
                mu=Exponential("mu_g", lam=1.0, shape=nbr, transform=None), sigma=1, shape=nbr
            )
            l_comp = LogNormal.dist(
                mu=Exponential("mu_l", lam=1.0, shape=nbr, transform=None), sigma=1, shape=nbr
            )
            # weight vector for the mixtures
            g_w = Dirichlet("g_w", a=floatX(np.ones(nbr) * 0.0000001), transform=None, shape=(nbr,))
            l_w = Dirichlet("l_w", a=floatX(np.ones(nbr) * 0.0000001), transform=None, shape=(nbr,))
            # mixture components
            g_mix = Mixture.dist(w=g_w, comp_dists=g_comp)
            l_mix = Mixture.dist(w=l_w, comp_dists=l_comp)
            # mixture of mixtures
            mix_w = Dirichlet("mix_w", a=floatX(np.ones(2)), transform=None, shape=(2,))
            mix = Mixture("mix", w=mix_w, comp_dists=[g_mix, l_mix], observed=np.exp(self.norm_x))

        test_point = model.compute_initial_point()

        def mixmixlogp(value, point):
            floatX = aesara.config.floatX
            priorlogp = (
                st.dirichlet.logpdf(
                    x=point["g_w"],
                    alpha=np.ones(nbr) * 0.0000001,
                ).astype(floatX)
                + st.expon.logpdf(x=point["mu_g"]).sum(dtype=floatX)
                + st.dirichlet.logpdf(
                    x=point["l_w"],
                    alpha=np.ones(nbr) * 0.0000001,
                ).astype(floatX)
                + st.expon.logpdf(x=point["mu_l"]).sum(dtype=floatX)
                + st.dirichlet.logpdf(
                    x=point["mix_w"],
                    alpha=np.ones(2),
                ).astype(floatX)
            )
            complogp1 = st.norm.logpdf(x=value, loc=point["mu_g"]).astype(floatX)
            mixlogp1 = logsumexp(
                np.log(point["g_w"]).astype(floatX) + complogp1, axis=-1, keepdims=True
            )
            complogp2 = st.lognorm.logpdf(value, 1.0, 0.0, np.exp(point["mu_l"])).astype(floatX)
            mixlogp2 = logsumexp(
                np.log(point["l_w"]).astype(floatX) + complogp2, axis=-1, keepdims=True
            )
            complogp_mix = np.concatenate((mixlogp1, mixlogp2), axis=1)
            mixmixlogpg = logsumexp(
                np.log(point["mix_w"]).astype(floatX) + complogp_mix, axis=-1, keepdims=False
            )
            return priorlogp, mixmixlogpg

        value = np.exp(self.norm_x)[:, None]
        priorlogp, mixmixlogpg = mixmixlogp(value, test_point)

        # check logp of mixture
        assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point), rtol=rtol)

        # check model logp
        assert_allclose(priorlogp + mixmixlogpg.sum(), model.logp(test_point), rtol=rtol)

        # check input and check logp again
        test_point["g_w"] = np.asarray([0.1, 0.1, 0.2, 0.6])
        test_point["mu_g"] = np.exp(np.random.randn(nbr))
        priorlogp, mixmixlogpg = mixmixlogp(value, test_point)
        assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point), rtol=rtol)
        assert_allclose(priorlogp + mixmixlogpg.sum(), model.logp(test_point), rtol=rtol)
Ejemplo n.º 12
0
class TestMixtureMoments:
    @pytest.mark.parametrize(
        "weights, comp_dists, size, expected",
        [
            (
                np.array([0.4, 0.6]),
                Normal.dist(mu=np.array([-2, 6]), sigma=np.array([5, 3])),
                None,
                2.8,
            ),
            (
                np.tile(1 / 13, 13),
                Normal.dist(-2, 1, size=(13,)),
                (3,),
                np.full((3,), -2),
            ),
            (
                np.array([0.4, 0.6]),
                Normal.dist([-2, 6], 3),
                (5, 3),
                np.full((5, 3), 2.8),
            ),
            (
                np.broadcast_to(np.array([0.4, 0.6]), (5, 3, 2)),
                Normal.dist(np.array([-2, 6]), np.array([5, 3])),
                None,
                np.full(shape=(5, 3), fill_value=2.8),
            ),
            (
                np.array([0.4, 0.6]),
                Normal.dist(np.array([-2, 6]), np.array([5, 3]), size=(5, 3, 2)),
                None,
                np.full(shape=(5, 3), fill_value=2.8),
            ),
            (
                np.array([[0.8, 0.2], [0.2, 0.8]]),
                Normal.dist(np.array([-2, 6])),
                None,
                np.array([-0.4, 4.4]),
            ),
            # implied size = (11, 7) will be overwritten by (5, 3)
            (
                np.array([0.4, 0.6]),
                Normal.dist(np.array([-2, 6]), np.array([5, 3]), size=(11, 7, 2)),
                (5, 3),
                np.full(shape=(5, 3), fill_value=2.8),
            ),
        ],
    )
    def test_single_univariate_component(self, weights, comp_dists, size, expected):
        with Model() as model:
            Mixture("x", weights, comp_dists, size=size)
        assert_moment_is_expected(model, expected, check_finite_logp=False)

    @pytest.mark.parametrize(
        "weights, comp_dists, size, expected",
        [
            (
                np.array([1, 0]),
                [Normal.dist(-2, 5), Normal.dist(6, 3)],
                None,
                -2,
            ),
            (
                np.array([0.4, 0.6]),
                [Normal.dist(-2, 5, size=(2,)), Normal.dist(6, 3, size=(2,))],
                None,
                np.full((2,), 2.8),
            ),
            (
                np.array([0.5, 0.5]),
                [Normal.dist(-2, 5), Exponential.dist(lam=1 / 3)],
                (3, 5),
                np.full((3, 5), 0.5),
            ),
            (
                np.broadcast_to(np.array([0.4, 0.6]), (5, 3, 2)),
                [Normal.dist(-2, 5), Normal.dist(6, 3)],
                None,
                np.full(shape=(5, 3), fill_value=2.8),
            ),
            (
                np.array([[0.8, 0.2], [0.2, 0.8]]),
                [Normal.dist(-2, 5), Normal.dist(6, 3)],
                None,
                np.array([-0.4, 4.4]),
            ),
            (
                np.array([[0.8, 0.2], [0.2, 0.8]]),
                [Normal.dist(-2, 5), Normal.dist(6, 3)],
                (3, 2),
                np.full((3, 2), np.array([-0.4, 4.4])),
            ),
            (
                # implied size = (11, 7) will be overwritten by (5, 3)
                np.array([0.4, 0.6]),
                [Normal.dist(-2, 5, size=(11, 7)), Normal.dist(6, 3, size=(11, 7))],
                (5, 3),
                np.full(shape=(5, 3), fill_value=2.8),
            ),
        ],
    )
    def test_list_univariate_components(self, weights, comp_dists, size, expected):
        with Model() as model:
            Mixture("x", weights, comp_dists, size=size)
        assert_moment_is_expected(model, expected, check_finite_logp=False)

    @pytest.mark.parametrize(
        "weights, comp_dists, size, expected",
        [
            (
                np.array([0.4, 0.6]),
                MvNormal.dist(mu=np.array([[-1, -2], [3, 5]]), cov=np.eye(2) * 0.3),
                None,
                np.array([1.4, 2.2]),
            ),
            (
                np.array([0.5, 0.5]),
                Dirichlet.dist(a=np.array([[0.0001, 0.0001, 1000], [2, 4, 6]])),
                (4,),
                np.array(np.full((4, 3), [1 / 12, 1 / 6, 3 / 4])),
            ),
            (
                np.array([0.4, 0.6]),
                MvNormal.dist(mu=np.array([-10, 0, 10]), cov=np.eye(3) * 3, size=(4, 2)),
                None,
                np.full((4, 3), [-10, 0, 10]),
            ),
            (
                np.array([[1.0, 0], [0.0, 1.0]]),
                MvNormal.dist(
                    mu=np.array([[-5, -10, -15], [5, 10, 15]]), cov=np.eye(3) * 3, size=(2,)
                ),
                (3, 2),
                np.full((3, 2, 3), [[-5, -10, -15], [5, 10, 15]]),
            ),
        ],
    )
    def test_single_multivariate_component(self, weights, comp_dists, size, expected):
        with Model() as model:
            Mixture("x", weights, comp_dists, size=size)
        assert_moment_is_expected(model, expected, check_finite_logp=False)

    @pytest.mark.parametrize(
        "weights, comp_dists, size, expected",
        [
            (
                np.array([0.4, 0.6]),
                [
                    MvNormal.dist(mu=np.array([-1, -2]), cov=np.eye(2) * 0.3),
                    MvNormal.dist(mu=np.array([3, 5]), cov=np.eye(2) * 0.8),
                ],
                None,
                np.array([1.4, 2.2]),
            ),
            (
                np.array([0.4, 0.6]),
                [
                    Dirichlet.dist(a=np.array([2, 3, 5])),
                    MvNormal.dist(mu=np.array([-10, 0, 10]), cov=np.eye(3) * 3),
                ],
                (4,),
                np.array(np.full((4, 3), [-5.92, 0.12, 6.2])),
            ),
            (
                np.array([0.4, 0.6]),
                [
                    Dirichlet.dist(a=np.array([2, 3, 5]), size=(2,)),
                    MvNormal.dist(mu=np.array([-10, 0, 10]), cov=np.eye(3) * 3, size=(2,)),
                ],
                None,
                np.full((2, 3), [-5.92, 0.12, 6.2]),
            ),
            (
                np.array([[1.0, 0], [0.0, 1.0]]),
                [
                    MvNormal.dist(mu=np.array([-5, -10, -15]), cov=np.eye(3) * 3, size=(2,)),
                    MvNormal.dist(mu=np.array([5, 10, 15]), cov=np.eye(3) * 3, size=(2,)),
                ],
                (3, 2),
                np.full((3, 2, 3), [[-5, -10, -15], [5, 10, 15]]),
            ),
        ],
    )
    def test_list_multivariate_components(self, weights, comp_dists, size, expected):
        with Model() as model:
            Mixture("x", weights, comp_dists, size=size)
        assert_moment_is_expected(model, expected, check_finite_logp=False)
Ejemplo n.º 13
0
        assert mix_dtype == aesara.config.floatX

    @pytest.mark.parametrize(
        "comp_dists, expected_shape",
        [
            (
                [
                    Normal.dist([[0, 0, 0], [0, 0, 0]]),
                    Normal.dist([0, 0, 0]),
                    Normal.dist([0]),
                ],
                (2, 3),
            ),
            (
                [
                    Dirichlet.dist([[1, 1, 1], [1, 1, 1]]),
                    Dirichlet.dist([1, 1, 1]),
                ],
                (2, 3),
            ),
        ],
    )
    def test_broadcast_components(self, comp_dists, expected_shape):
        n_dists = len(comp_dists)
        mix = Mixture.dist(w=np.ones(n_dists) / n_dists, comp_dists=comp_dists)
        mix_eval = mix.eval()
        assert tuple(mix_eval.shape) == expected_shape
        assert np.unique(mix_eval).size == mix.eval().size
        for comp_dist in mix.owner.inputs[2:]:
            # We check that the input is a "pure" RandomVariable and not a broadcast
            # operation. This confirms that all draws will be unique
Ejemplo n.º 14
0
def test_dirichlet_moment(a, size, expected):
    with Model() as model:
        Dirichlet("x", a=a, size=size)
    assert_moment_is_expected(model, expected)