Exemplo n.º 1
0
    def test_single_multivariate_component_deterministic_weights(self, weights, component, size):
        # This test needs seeding to avoid repetitions
        rngs = [
            aesara.shared(np.random.default_rng(seed))
            for seed in self.get_random_state().randint(2**30, size=2)
        ]
        mix = Mixture.dist(weights, component, size=size, rngs=rngs)
        mix_eval = mix.eval()

        # Test shape
        # component shape is either (4, 2, 3), (2, 3)
        # weights shape is either (4, 2) or (2,)
        if size is not None:
            expected_shape = size + (3,)
        elif component.ndim == 3 or weights.ndim == 2:
            expected_shape = (4, 3)
        else:
            expected_shape = (3,)
        assert mix_eval.shape == expected_shape

        # Test draws
        totals = mix_eval.sum(-1)
        expected_large_count = (weights == 1)[..., 1]
        assert np.all((totals == 10_000) == expected_large_count)
        repetitions = np.unique(mix_eval[..., 0]).size < totals.size
        assert not repetitions

        # Test logp
        mix_logp_eval = logp(mix, mix_eval).eval()
        expected_logp_shape = expected_shape[:-1]
        assert mix_logp_eval.shape == expected_logp_shape
        bcast_weights = np.broadcast_to(weights, (*expected_logp_shape, 2))
        expected_logp = logp(component, mix_eval[..., None, :]).eval()[bcast_weights == 1]
        expected_logp = expected_logp.reshape(expected_logp_shape)
        assert np.allclose(mix_logp_eval, expected_logp)
Exemplo n.º 2
0
    def test_broadcasting_in_shape(self):
        with Model() as model:
            mu = Gamma("mu", 1.0, 1.0, shape=2)
            comp_dists = Poisson.dist(mu, shape=2)
            mix = Mixture("mix", w=np.ones(2) / 2, comp_dists=comp_dists, shape=(1000,))
            prior = sample_prior_predictive(samples=self.n_samples, return_inferencedata=False)

        assert prior["mix"].shape == (self.n_samples, 1000)
Exemplo n.º 3
0
    def test_mixture_dtype(self):
        mix_dtype = Mixture.dist(
            w=[0.5, 0.5],
            comp_dists=[
                Multinomial.dist(n=5, p=[0.5, 0.5]),
                Multinomial.dist(n=5, p=[0.5, 0.5]),
            ],
        ).dtype
        assert mix_dtype == "int64"

        mix_dtype = Mixture.dist(
            w=[0.5, 0.5],
            comp_dists=[
                Dirichlet.dist(a=[0.5, 0.5]),
                Dirichlet.dist(a=[0.5, 0.5]),
            ],
        ).dtype
        assert mix_dtype == aesara.config.floatX
Exemplo n.º 4
0
    def test_change_size(self, comp_dists, expand):
        univariate = comp_dists[0].owner.op.ndim_supp == 0

        mix = Mixture.dist(w=Dirichlet.dist([1, 1]), comp_dists=comp_dists)
        mix = Mixture.change_size(mix, new_size=(4,), expand=expand)
        draws = mix.eval()
        expected_shape = (4,) if univariate else (4, 3)
        assert draws.shape == expected_shape
        assert np.unique(draws).size == draws.size

        mix = Mixture.dist(w=Dirichlet.dist([1, 1]), comp_dists=comp_dists, size=(3,))
        mix = Mixture.change_size(mix, new_size=(5, 4), expand=expand)
        draws = mix.eval()
        expected_shape = (5, 4) if univariate else (5, 4, 3)
        if expand:
            expected_shape = expected_shape + (3,)
        assert draws.shape == expected_shape
        assert np.unique(draws).size == draws.size
Exemplo n.º 5
0
 def test_component_choice_random(self):
     """Test that mixture choices change over evaluations"""
     with Model() as m:
         weights = [0.5, 0.5]
         components = [Normal.dist(-10, 0.01), Normal.dist(10, 0.01)]
         mix = Mixture.dist(weights, components)
     draws = draw(mix, draws=10)
     # Probability of coming from same component 10 times is 0.5**10
     assert np.unique(draws > 0).size == 2
Exemplo n.º 6
0
 def test_broadcast_components(self, comp_dists, expected_shape):
     n_dists = len(comp_dists)
     mix = Mixture.dist(w=np.ones(n_dists) / n_dists, comp_dists=comp_dists)
     mix_eval = mix.eval()
     assert tuple(mix_eval.shape) == expected_shape
     assert np.unique(mix_eval).size == mix.eval().size
     for comp_dist in mix.owner.inputs[2:]:
         # We check that the input is a "pure" RandomVariable and not a broadcast
         # operation. This confirms that all draws will be unique
         assert isinstance(comp_dist.owner.op, RandomVariable)
         assert tuple(comp_dist.shape.eval()) == expected_shape
Exemplo n.º 7
0
    def test_components_expanded_by_weights(self, comp_dists):
        """Test that components are expanded when size or weights are larger than components"""
        univariate = comp_dists[0].owner.op.ndim_supp == 0

        mix = Mixture.dist(
            w=Dirichlet.dist([1, 1], shape=(3, 2)),
            comp_dists=comp_dists,
            size=(3,),
        )
        draws = mix.eval()
        assert draws.shape == (3,) if univariate else (3, 3)
        assert np.unique(draws).size == draws.size

        mix = Mixture.dist(
            w=Dirichlet.dist([1, 1], shape=(4, 3, 2)),
            comp_dists=comp_dists,
            size=(3,),
        )
        draws = mix.eval()
        assert draws.shape == (4, 3) if univariate else (4, 3, 3)
        assert np.unique(draws).size == draws.size
Exemplo n.º 8
0
    def test_iterable_single_component_warning(self):
        with pytest.warns(None) as record:
            Mixture.dist(w=[0.5, 0.5], comp_dists=Normal.dist(size=2))
            Mixture.dist(w=[0.5, 0.5], comp_dists=[Normal.dist(size=2), Normal.dist(size=2)])
        assert not record

        with pytest.warns(UserWarning, match="Single component will be treated as a mixture"):
            Mixture.dist(w=[0.5, 0.5], comp_dists=[Normal.dist(size=2)])
Exemplo n.º 9
0
    def test_single_poisson_predictive_sampling_shape(self):
        # test the shape broadcasting in mixture random
        rng = self.get_random_state()
        y = np.concatenate([rng.poisson(5, size=10), rng.poisson(9, size=10)])
        with Model() as model:
            comp0 = Poisson.dist(mu=np.ones(2))
            w0 = Dirichlet("w0", a=np.ones(2), shape=(2,))
            like0 = Mixture("like0", w=w0, comp_dists=comp0, observed=y)

            comp1 = Poisson.dist(mu=np.ones((20, 2)), shape=(20, 2))
            w1 = Dirichlet("w1", a=np.ones(2), shape=(2,))
            like1 = Mixture("like1", w=w1, comp_dists=comp1, observed=y)

            comp2 = Poisson.dist(mu=np.ones(2))
            w2 = Dirichlet("w2", a=np.ones(2), shape=(20, 2))
            like2 = Mixture("like2", w=w2, comp_dists=comp2, observed=y)

            comp3 = Poisson.dist(mu=np.ones(2), shape=(20, 2))
            w3 = Dirichlet("w3", a=np.ones(2), shape=(20, 2))
            like3 = Mixture("like3", w=w3, comp_dists=comp3, observed=y)

        n_samples = 30
        with model:
            prior = sample_prior_predictive(samples=n_samples, return_inferencedata=False)
            ppc = sample_posterior_predictive(
                [self.get_inital_point(model)], samples=n_samples, return_inferencedata=False
            )

        assert prior["like0"].shape == (n_samples, 20)
        assert prior["like1"].shape == (n_samples, 20)
        assert prior["like2"].shape == (n_samples, 20)
        assert prior["like3"].shape == (n_samples, 20)

        assert ppc["like0"].shape == (n_samples, 20)
        assert ppc["like1"].shape == (n_samples, 20)
        assert ppc["like2"].shape == (n_samples, 20)
        assert ppc["like3"].shape == (n_samples, 20)
Exemplo n.º 10
0
    def test_list_univariate_components_deterministic_weights(self, weights, components, size):
        # Size can't be smaller than what is implied by replication dimensions
        if size is not None and len(size) < max(components[0].ndim, weights.ndim - 1):
            return

        mix = Mixture.dist(weights, components, size=size)
        mix_eval = mix.eval()

        # Test shape
        # components[0] shape is either (4, 3), (3,) or ()
        # weights shape is either (3, 2) or (2,)
        if size is not None:
            expected_shape = size
        elif components[0].ndim == 2:
            expected_shape = (4, 3)
        elif components[0].ndim == 1 or weights.ndim == 2:
            expected_shape = (3,)
        else:
            expected_shape = ()
        assert mix_eval.shape == expected_shape

        # Test draws
        expected_positive = np.zeros_like(mix_eval)
        if expected_positive.ndim > 0:
            expected_positive[..., :] = (weights == 1)[..., 1]
        assert np.all((mix_eval > 0) == expected_positive)
        repetitions = np.unique(mix_eval).size < mix_eval.size
        assert not repetitions

        # Test logp
        mix_logp_eval = logp(mix, mix_eval).eval()
        assert mix_logp_eval.shape == expected_shape
        bcast_weights = np.broadcast_to(weights, (*expected_shape, 2))
        expected_logp = np.stack(
            (
                logp(components[0], mix_eval).eval(),
                logp(components[1], mix_eval).eval(),
            ),
            axis=-1,
        )[bcast_weights == 1]
        expected_logp = expected_logp.reshape(expected_shape)
        assert np.allclose(mix_logp_eval, expected_logp)
Exemplo n.º 11
0
    def test_list_poissons_sampling(self):
        pois_w = np.array([0.4, 0.6])
        pois_mu = np.array([5.0, 20.0])
        pois_x = generate_poisson_mixture_data(pois_w, pois_mu, size=1000)

        with Model() as model:
            w = Dirichlet("w", floatX(np.ones_like(pois_w)), shape=pois_w.shape)
            mu = Gamma("mu", 1.0, 1.0, shape=pois_w.size)
            Mixture("x_obs", w, [Poisson.dist(mu[0]), Poisson.dist(mu[1])], observed=pois_x)
            trace = sample(
                5000,
                chains=1,
                step=Metropolis(),
                random_seed=self.random_seed,
                progressbar=False,
                return_inferencedata=False,
            )

        assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(pois_w), rtol=0.1, atol=0.1)
        assert_allclose(np.sort(trace["mu"].mean(axis=0)), np.sort(pois_mu), rtol=0.1, atol=0.1)
Exemplo n.º 12
0
    def test_list_mvnormals_predictive_sampling_shape(self):
        N = 100  # number of data points
        K = 3  # number of mixture components
        D = 3  # dimensionality of the data
        X = MvNormal.dist(np.zeros(D), np.eye(D), size=N).eval()

        with Model() as model:
            pi = Dirichlet("pi", np.ones(K), shape=(K,))

            comp_dist = []
            mu = []
            packed_chol = []
            chol = []
            for i in range(K):
                mu.append(Normal(f"mu{i}", 0, 10, shape=D))
                packed_chol.append(
                    LKJCholeskyCov(
                        f"chol_cov_{i}",
                        eta=2,
                        n=D,
                        sd_dist=HalfNormal.dist(2.5, size=D),
                        compute_corr=False,
                    )
                )
                chol.append(expand_packed_triangular(D, packed_chol[i], lower=True))
                comp_dist.append(MvNormal.dist(mu=mu[i], chol=chol[i], shape=D))

            Mixture("x_obs", pi, comp_dist, observed=X)

        n_samples = 20
        with model:
            prior = sample_prior_predictive(samples=n_samples, return_inferencedata=False)
            ppc = sample_posterior_predictive(
                [self.get_inital_point(model)], samples=n_samples, return_inferencedata=False
            )
        assert ppc["x_obs"].shape == (n_samples,) + X.shape
        assert prior["x_obs"].shape == (n_samples,) + X.shape
        assert prior["mu0"].shape == (n_samples, D)
        assert prior["chol_cov_0"].shape == (n_samples, D * (D + 1) // 2)
Exemplo n.º 13
0
    def test_list_multivariate_components_deterministic_weights(self, weights, components, size):
        mix = Mixture.dist(weights, components, size=size)
        mix_eval = mix.eval()

        # Test shape
        # components[0] shape is either (4, 3) or (3,)
        # weights shape is either (4, 2) or (2,)
        if size is not None:
            expected_shape = size + (3,)
        elif components[0].ndim == 2 or weights.ndim == 2:
            expected_shape = (4, 3)
        else:
            expected_shape = (3,)
        assert mix_eval.shape == expected_shape

        # Test draws
        expected_positive = np.zeros_like(mix_eval)
        expected_positive[..., :] = (weights == 1)[..., 1, None]
        assert np.all((mix_eval > 0) == expected_positive)
        repetitions = np.unique(mix_eval).size < mix_eval.size
        assert not repetitions

        # Test logp
        # MvNormal logp is currently limited to 2d values
        expectation = pytest.raises(ValueError) if mix_eval.ndim > 2 else does_not_raise()
        with expectation:
            mix_logp_eval = logp(mix, mix_eval).eval()
            assert mix_logp_eval.shape == expected_shape[:-1]
            bcast_weights = np.broadcast_to(weights, (*expected_shape[:-1], 2))
            expected_logp = np.stack(
                (
                    logp(components[0], mix_eval).eval(),
                    logp(components[1], mix_eval).eval(),
                ),
                axis=-1,
            )[bcast_weights == 1]
            expected_logp = expected_logp.reshape(expected_shape[:-1])
            assert np.allclose(mix_logp_eval, expected_logp)
Exemplo n.º 14
0
    def test_nested_mixture(self):
        if aesara.config.floatX == "float32":
            rtol = 1e-4
        else:
            rtol = 1e-7
        nbr = 4
        with Model() as model:
            # mixtures components
            g_comp = Normal.dist(
                mu=Exponential("mu_g", lam=1.0, shape=nbr, transform=None), sigma=1, shape=nbr
            )
            l_comp = LogNormal.dist(
                mu=Exponential("mu_l", lam=1.0, shape=nbr, transform=None), sigma=1, shape=nbr
            )
            # weight vector for the mixtures
            g_w = Dirichlet("g_w", a=floatX(np.ones(nbr) * 0.0000001), transform=None, shape=(nbr,))
            l_w = Dirichlet("l_w", a=floatX(np.ones(nbr) * 0.0000001), transform=None, shape=(nbr,))
            # mixture components
            g_mix = Mixture.dist(w=g_w, comp_dists=g_comp)
            l_mix = Mixture.dist(w=l_w, comp_dists=l_comp)
            # mixture of mixtures
            mix_w = Dirichlet("mix_w", a=floatX(np.ones(2)), transform=None, shape=(2,))
            mix = Mixture("mix", w=mix_w, comp_dists=[g_mix, l_mix], observed=np.exp(self.norm_x))

        test_point = model.compute_initial_point()

        def mixmixlogp(value, point):
            floatX = aesara.config.floatX
            priorlogp = (
                st.dirichlet.logpdf(
                    x=point["g_w"],
                    alpha=np.ones(nbr) * 0.0000001,
                ).astype(floatX)
                + st.expon.logpdf(x=point["mu_g"]).sum(dtype=floatX)
                + st.dirichlet.logpdf(
                    x=point["l_w"],
                    alpha=np.ones(nbr) * 0.0000001,
                ).astype(floatX)
                + st.expon.logpdf(x=point["mu_l"]).sum(dtype=floatX)
                + st.dirichlet.logpdf(
                    x=point["mix_w"],
                    alpha=np.ones(2),
                ).astype(floatX)
            )
            complogp1 = st.norm.logpdf(x=value, loc=point["mu_g"]).astype(floatX)
            mixlogp1 = logsumexp(
                np.log(point["g_w"]).astype(floatX) + complogp1, axis=-1, keepdims=True
            )
            complogp2 = st.lognorm.logpdf(value, 1.0, 0.0, np.exp(point["mu_l"])).astype(floatX)
            mixlogp2 = logsumexp(
                np.log(point["l_w"]).astype(floatX) + complogp2, axis=-1, keepdims=True
            )
            complogp_mix = np.concatenate((mixlogp1, mixlogp2), axis=1)
            mixmixlogpg = logsumexp(
                np.log(point["mix_w"]).astype(floatX) + complogp_mix, axis=-1, keepdims=False
            )
            return priorlogp, mixmixlogpg

        value = np.exp(self.norm_x)[:, None]
        priorlogp, mixmixlogpg = mixmixlogp(value, test_point)

        # check logp of mixture
        assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point), rtol=rtol)

        # check model logp
        assert_allclose(priorlogp + mixmixlogpg.sum(), model.logp(test_point), rtol=rtol)

        # check input and check logp again
        test_point["g_w"] = np.asarray([0.1, 0.1, 0.2, 0.6])
        test_point["mu_g"] = np.exp(np.random.randn(nbr))
        priorlogp, mixmixlogpg = mixmixlogp(value, test_point)
        assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point), rtol=rtol)
        assert_allclose(priorlogp + mixmixlogpg.sum(), model.logp(test_point), rtol=rtol)
Exemplo n.º 15
0
 def test_list_multivariate_components(self, weights, comp_dists, size, expected):
     with Model() as model:
         Mixture("x", weights, comp_dists, size=size)
     assert_moment_is_expected(model, expected, check_finite_logp=False)