Example #1
0
    def test_vector_components(self):
        nd = 3
        npop = 4
        # [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]
        mus = at.constant(np.full((nd, npop), np.arange(npop)))

        with Model(rng_seeder=self.get_random_state()) as model:
            m = Mixture(
                "m",
                w=np.ones(npop) / npop,
                # MvNormal distribution with squared sigma diagonal covariance should
                # be equal to vector of Normals from latent_m
                comp_dists=[MvNormal.dist(mus[:, i], np.eye(nd) * 1e-5**2) for i in range(npop)],
            )
            z = Categorical("z", p=np.ones(npop) / npop)
            latent_m = Normal("latent_m", mu=mus[..., z], sigma=1e-5, shape=nd)

        size = 100
        m_val = draw(m, draws=size)
        latent_m_val = draw(latent_m, draws=size)
        assert m_val.shape == latent_m_val.shape
        # Test that each element in axis = -1 comes from the same mixture
        # component
        assert np.all(np.diff(m_val) < 1e-3)
        assert np.all(np.diff(latent_m_val) < 1e-3)
        # TODO: The following statistical test appears to be more flaky than expected
        #  even though the  distributions should be the same. Seeding should make it
        #  stable but might be worth investigating further
        self.samples_from_same_distribution(m_val, latent_m_val)

        # Check that mixing of values in the last axis leads to smaller logp
        logp_fn = model.compile_logp(vars=[m])
        assert logp_fn({"m": [0, 0, 0]}) > logp_fn({"m": [0, 1, 0]}) > logp_fn({"m": [0, 1, 2]})
        self.logp_matches(m, latent_m, z, npop, model=model)
Example #2
0
    def test_scalar_components(self):
        nd = 3
        npop = 4
        # [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]
        mus = at.constant(np.full((nd, npop), np.arange(npop)))

        with Model(rng_seeder=self.get_random_state()) as model:
            m = NormalMixture(
                "m",
                w=np.ones(npop) / npop,
                mu=mus,
                sigma=1e-5,
                comp_shape=(nd, npop),
                shape=nd,
            )
            z = Categorical("z", p=np.ones(npop) / npop, shape=nd)
            mu = at.as_tensor_variable([mus[i, z[i]] for i in range(nd)])
            latent_m = Normal("latent_m", mu=mu, sigma=1e-5, shape=nd)

        size = 100
        m_val = draw(m, draws=size)
        latent_m_val = draw(latent_m, draws=size)

        assert m_val.shape == latent_m_val.shape
        # Test that each element in axis = -1 can come from independent
        # components
        assert not all(np.all(np.diff(m_val) < 1e-3, axis=-1))
        assert not all(np.all(np.diff(latent_m_val) < 1e-3, axis=-1))
        self.samples_from_same_distribution(m_val, latent_m_val)

        # Check that logp is the same whether elements of the last axis are mixed or not
        logp_fn = model.compile_logp(vars=[m])
        assert np.isclose(logp_fn({"m": [0, 0, 0]}), logp_fn({"m": [0, 1, 2]}))
        self.logp_matches(m, latent_m, z, npop, model=model)
Example #3
0
    def test_with_mvnormal(self):
        # 10 batch, 3-variate Gaussian
        mu = np.random.randn(self.mixture_comps, 3)
        mat = np.random.randn(3, 3)
        cov = mat @ mat.T
        chol = np.linalg.cholesky(cov)
        w = np.ones(self.mixture_comps) / self.mixture_comps

        with Model() as model:
            comp_dists = MvNormal.dist(mu=mu, chol=chol, shape=(self.mixture_comps, 3))
            mixture = Mixture("mixture", w=w, comp_dists=comp_dists, shape=(3,))
            prior = sample_prior_predictive(samples=self.n_samples, return_inferencedata=False)

        assert prior["mixture"].shape == (self.n_samples, 3)
        assert draw(mixture, draws=self.size).shape == (self.size, 3)

        if aesara.config.floatX == "float32":
            rtol = 1e-4
        else:
            rtol = 1e-7

        initial_point = model.compute_initial_point()
        comp_logp = logp(comp_dists, initial_point["mixture"].reshape(1, 3))
        log_sum_exp = logsumexp(comp_logp.eval() + np.log(w), axis=0, keepdims=True).sum()
        assert_allclose(
            model.compile_logp()(initial_point),
            log_sum_exp,
            rtol,
        )
Example #4
0
    def test_with_multinomial(self, batch_shape):
        p = np.random.uniform(size=(*batch_shape, self.mixture_comps, 3))
        n = 100 * np.ones((*batch_shape, 1))
        w = np.ones(self.mixture_comps) / self.mixture_comps
        mixture_axis = len(batch_shape)
        with Model() as model:
            comp_dists = Multinomial.dist(p=p, n=n, shape=(*batch_shape, self.mixture_comps, 3))
            mixture = Mixture(
                "mixture",
                w=w,
                comp_dists=comp_dists,
                shape=(*batch_shape, 3),
            )
            prior = sample_prior_predictive(samples=self.n_samples, return_inferencedata=False)

        assert prior["mixture"].shape == (self.n_samples, *batch_shape, 3)
        assert draw(mixture, draws=self.size).shape == (self.size, *batch_shape, 3)

        if aesara.config.floatX == "float32":
            rtol = 1e-4
        else:
            rtol = 1e-7

        initial_point = model.compute_initial_point()
        comp_logp = logp(comp_dists, initial_point["mixture"].reshape(*batch_shape, 1, 3))
        log_sum_exp = logsumexp(
            comp_logp.eval() + np.log(w), axis=mixture_axis, keepdims=True
        ).sum()
        assert_allclose(
            model.compile_logp()(initial_point),
            log_sum_exp,
            rtol,
        )
Example #5
0
    def test_batched_size(self, constant):
        ar_order, steps, batch_size = 3, 100, 5
        beta_tp = np.random.randn(batch_size, ar_order + int(constant))
        y_tp = np.random.randn(batch_size, steps)
        with Model() as t0:
            y = AR("y",
                   beta_tp,
                   shape=(batch_size, steps),
                   initval=y_tp,
                   constant=constant)
        with Model() as t1:
            for i in range(batch_size):
                AR(f"y_{i}",
                   beta_tp[i],
                   sigma=1.0,
                   shape=steps,
                   initval=y_tp[i],
                   constant=constant)

        assert y.owner.op.ar_order == ar_order

        np.testing.assert_allclose(
            t0.compile_logp()(t0.initial_point()),
            t1.compile_logp()(t1.initial_point()),
        )

        y_eval = draw(y, draws=2)
        assert y_eval[0].shape == (batch_size, steps)
        assert not np.any(np.isclose(y_eval[0], y_eval[1]))
Example #6
0
 def test_component_choice_random(self):
     """Test that mixture choices change over evaluations"""
     with Model() as m:
         weights = [0.5, 0.5]
         components = [Normal.dist(-10, 0.01), Normal.dist(10, 0.01)]
         mix = Mixture.dist(weights, components)
     draws = draw(mix, draws=10)
     # Probability of coming from same component 10 times is 0.5**10
     assert np.unique(draws > 0).size == 2