Exemplo n.º 1
0
    def test_with_multinomial(self, batch_shape):
        p = np.random.uniform(size=(*batch_shape, self.mixture_comps, 3))
        n = 100 * np.ones((*batch_shape, 1))
        w = np.ones(self.mixture_comps) / self.mixture_comps
        mixture_axis = len(batch_shape)
        with Model() as model:
            comp_dists = Multinomial.dist(p=p, n=n, shape=(*batch_shape, self.mixture_comps, 3))
            mixture = Mixture(
                "mixture",
                w=w,
                comp_dists=comp_dists,
                shape=(*batch_shape, 3),
            )
            prior = sample_prior_predictive(samples=self.n_samples, return_inferencedata=False)

        assert prior["mixture"].shape == (self.n_samples, *batch_shape, 3)
        assert draw(mixture, draws=self.size).shape == (self.size, *batch_shape, 3)

        if aesara.config.floatX == "float32":
            rtol = 1e-4
        else:
            rtol = 1e-7

        initial_point = model.compute_initial_point()
        comp_logp = logp(comp_dists, initial_point["mixture"].reshape(*batch_shape, 1, 3))
        log_sum_exp = logsumexp(
            comp_logp.eval() + np.log(w), axis=mixture_axis, keepdims=True
        ).sum()
        assert_allclose(
            model.compile_logp()(initial_point),
            log_sum_exp,
            rtol,
        )
Exemplo n.º 2
0
    def test_with_mvnormal(self):
        # 10 batch, 3-variate Gaussian
        mu = np.random.randn(self.mixture_comps, 3)
        mat = np.random.randn(3, 3)
        cov = mat @ mat.T
        chol = np.linalg.cholesky(cov)
        w = np.ones(self.mixture_comps) / self.mixture_comps

        with Model() as model:
            comp_dists = MvNormal.dist(mu=mu, chol=chol, shape=(self.mixture_comps, 3))
            mixture = Mixture("mixture", w=w, comp_dists=comp_dists, shape=(3,))
            prior = sample_prior_predictive(samples=self.n_samples, return_inferencedata=False)

        assert prior["mixture"].shape == (self.n_samples, 3)
        assert draw(mixture, draws=self.size).shape == (self.size, 3)

        if aesara.config.floatX == "float32":
            rtol = 1e-4
        else:
            rtol = 1e-7

        initial_point = model.compute_initial_point()
        comp_logp = logp(comp_dists, initial_point["mixture"].reshape(1, 3))
        log_sum_exp = logsumexp(comp_logp.eval() + np.log(w), axis=0, keepdims=True).sum()
        assert_allclose(
            model.compile_logp()(initial_point),
            log_sum_exp,
            rtol,
        )
Exemplo n.º 3
0
    def test_broadcasting_in_shape(self):
        with Model() as model:
            mu = Gamma("mu", 1.0, 1.0, shape=2)
            comp_dists = Poisson.dist(mu, shape=2)
            mix = Mixture("mix", w=np.ones(2) / 2, comp_dists=comp_dists, shape=(1000,))
            prior = sample_prior_predictive(samples=self.n_samples, return_inferencedata=False)

        assert prior["mix"].shape == (self.n_samples, 1000)
Exemplo n.º 4
0
    def initialize_population(self) -> Dict[str, NDArray]:
        """Create an initial population from the prior distribution"""

        return sample_prior_predictive(
            self.draws,
            var_names=[v.name for v in self.model.unobserved_value_vars],
            model=self.model,
            return_inferencedata=False,
        )
Exemplo n.º 5
0
def test_interval_missing_observations():
    with Model() as model:
        obs1 = ma.masked_values([1, 2, -1, 4, -1], value=-1)
        obs2 = ma.masked_values([-1, -1, 6, -1, 8], value=-1)

        rng = aesara.shared(np.random.RandomState(2323), borrow=True)

        with pytest.warns(ImputationWarning):
            theta1 = Uniform("theta1", 0, 5, observed=obs1, rng=rng)
        with pytest.warns(ImputationWarning):
            theta2 = Normal("theta2", mu=theta1, observed=obs2, rng=rng)

        assert "theta1_observed" in model.named_vars
        assert "theta1_missing_interval__" in model.named_vars
        assert not hasattr(
            model.rvs_to_values[model.named_vars["theta1_observed"]].tag, "transform"
        )

        prior_trace = sample_prior_predictive(return_inferencedata=False)

        # Make sure the observed + missing combined deterministics have the
        # same shape as the original observations vectors
        assert prior_trace["theta1"].shape[-1] == obs1.shape[0]
        assert prior_trace["theta2"].shape[-1] == obs2.shape[0]

        # Make sure that the observed values are newly generated samples
        assert np.all(np.var(prior_trace["theta1_observed"], 0) > 0.0)
        assert np.all(np.var(prior_trace["theta2_observed"], 0) > 0.0)

        # Make sure the missing parts of the combined deterministic matches the
        # sampled missing and observed variable values
        assert np.mean(prior_trace["theta1"][:, obs1.mask] - prior_trace["theta1_missing"]) == 0.0
        assert np.mean(prior_trace["theta1"][:, ~obs1.mask] - prior_trace["theta1_observed"]) == 0.0
        assert np.mean(prior_trace["theta2"][:, obs2.mask] - prior_trace["theta2_missing"]) == 0.0
        assert np.mean(prior_trace["theta2"][:, ~obs2.mask] - prior_trace["theta2_observed"]) == 0.0

        assert {"theta1", "theta2"} <= set(prior_trace.keys())

        trace = sample(
            chains=1, draws=50, compute_convergence_checks=False, return_inferencedata=False
        )

        assert np.all(0 < trace["theta1_missing"].mean(0))
        assert np.all(0 < trace["theta2_missing"].mean(0))
        assert "theta1" not in trace.varnames
        assert "theta2" not in trace.varnames

        # Make sure that the observed values are newly generated samples and that
        # the observed and deterministic matche
        pp_trace = sample_posterior_predictive(trace, return_inferencedata=False, keep_size=False)
        assert np.all(np.var(pp_trace["theta1"], 0) > 0.0)
        assert np.all(np.var(pp_trace["theta2"], 0) > 0.0)
        assert np.mean(pp_trace["theta1"][:, ~obs1.mask] - pp_trace["theta1_observed"]) == 0.0
        assert np.mean(pp_trace["theta2"][:, ~obs2.mask] - pp_trace["theta2_observed"]) == 0.0
Exemplo n.º 6
0
def test_missing(data):

    with Model() as model:
        x = Normal("x", 1, 1)
        with pytest.warns(ImputationWarning):
            _ = Normal("y", x, 1, observed=data)

    assert "y_missing" in model.named_vars

    test_point = model.compute_initial_point()
    assert not np.isnan(model.compile_logp()(test_point))

    with model:
        prior_trace = sample_prior_predictive(return_inferencedata=False)
    assert {"x", "y"} <= set(prior_trace.keys())
Exemplo n.º 7
0
Arquivo: smc.py Projeto: sthagen/pymc3
    def initialize_population(self) -> Dict[str, np.ndarray]:
        """Create an initial population from the prior distribution"""
        sys.stdout.write(" ")  # see issue #5828
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore",
                                    category=UserWarning,
                                    message="The effect of Potentials")

            result = sample_prior_predictive(
                self.draws,
                var_names=[v.name for v in self.model.unobserved_value_vars],
                model=self.model,
                return_inferencedata=False,
            )
        return cast(Dict[str, np.ndarray], result)
Exemplo n.º 8
0
def test_missing_with_predictors():
    predictors = array([0.5, 1, 0.5, 2, 0.3])
    data = ma.masked_values([1, 2, -1, 4, -1], value=-1)
    with Model() as model:
        x = Normal("x", 1, 1)
        with pytest.warns(ImputationWarning):
            y = Normal("y", x * predictors, 1, observed=data)

    assert "y_missing" in model.named_vars

    test_point = model.compute_initial_point()
    assert not np.isnan(model.compile_logp()(test_point))

    with model:
        prior_trace = sample_prior_predictive(return_inferencedata=False)
    assert {"x", "y"} <= set(prior_trace.keys())
Exemplo n.º 9
0
def test_missing_dual_observations():
    with Model() as model:
        obs1 = ma.masked_values([1, 2, -1, 4, -1], value=-1)
        obs2 = ma.masked_values([-1, -1, 6, -1, 8], value=-1)
        beta1 = Normal("beta1", 1, 1)
        beta2 = Normal("beta2", 2, 1)
        latent = Normal("theta", size=5)
        with pytest.warns(ImputationWarning):
            ovar1 = Normal("o1", mu=beta1 * latent, observed=obs1)
        with pytest.warns(ImputationWarning):
            ovar2 = Normal("o2", mu=beta2 * latent, observed=obs2)

        prior_trace = sample_prior_predictive(return_inferencedata=False)
        assert {"beta1", "beta2", "theta", "o1", "o2"} <= set(prior_trace.keys())
        # TODO: Assert something
        trace = sample(chains=1, draws=50)
Exemplo n.º 10
0
    def test_list_mvnormals_predictive_sampling_shape(self):
        N = 100  # number of data points
        K = 3  # number of mixture components
        D = 3  # dimensionality of the data
        X = MvNormal.dist(np.zeros(D), np.eye(D), size=N).eval()

        with Model() as model:
            pi = Dirichlet("pi", np.ones(K), shape=(K,))

            comp_dist = []
            mu = []
            packed_chol = []
            chol = []
            for i in range(K):
                mu.append(Normal(f"mu{i}", 0, 10, shape=D))
                packed_chol.append(
                    LKJCholeskyCov(
                        f"chol_cov_{i}",
                        eta=2,
                        n=D,
                        sd_dist=HalfNormal.dist(2.5, size=D),
                        compute_corr=False,
                    )
                )
                chol.append(expand_packed_triangular(D, packed_chol[i], lower=True))
                comp_dist.append(MvNormal.dist(mu=mu[i], chol=chol[i], shape=D))

            Mixture("x_obs", pi, comp_dist, observed=X)

        n_samples = 20
        with model:
            prior = sample_prior_predictive(samples=n_samples, return_inferencedata=False)
            ppc = sample_posterior_predictive(
                [self.get_inital_point(model)], samples=n_samples, return_inferencedata=False
            )
        assert ppc["x_obs"].shape == (n_samples,) + X.shape
        assert prior["x_obs"].shape == (n_samples,) + X.shape
        assert prior["mu0"].shape == (n_samples, D)
        assert prior["chol_cov_0"].shape == (n_samples, D * (D + 1) // 2)
Exemplo n.º 11
0
    def test_single_poisson_predictive_sampling_shape(self):
        # test the shape broadcasting in mixture random
        rng = self.get_random_state()
        y = np.concatenate([rng.poisson(5, size=10), rng.poisson(9, size=10)])
        with Model() as model:
            comp0 = Poisson.dist(mu=np.ones(2))
            w0 = Dirichlet("w0", a=np.ones(2), shape=(2,))
            like0 = Mixture("like0", w=w0, comp_dists=comp0, observed=y)

            comp1 = Poisson.dist(mu=np.ones((20, 2)), shape=(20, 2))
            w1 = Dirichlet("w1", a=np.ones(2), shape=(2,))
            like1 = Mixture("like1", w=w1, comp_dists=comp1, observed=y)

            comp2 = Poisson.dist(mu=np.ones(2))
            w2 = Dirichlet("w2", a=np.ones(2), shape=(20, 2))
            like2 = Mixture("like2", w=w2, comp_dists=comp2, observed=y)

            comp3 = Poisson.dist(mu=np.ones(2), shape=(20, 2))
            w3 = Dirichlet("w3", a=np.ones(2), shape=(20, 2))
            like3 = Mixture("like3", w=w3, comp_dists=comp3, observed=y)

        n_samples = 30
        with model:
            prior = sample_prior_predictive(samples=n_samples, return_inferencedata=False)
            ppc = sample_posterior_predictive(
                [self.get_inital_point(model)], samples=n_samples, return_inferencedata=False
            )

        assert prior["like0"].shape == (n_samples, 20)
        assert prior["like1"].shape == (n_samples, 20)
        assert prior["like2"].shape == (n_samples, 20)
        assert prior["like3"].shape == (n_samples, 20)

        assert ppc["like0"].shape == (n_samples, 20)
        assert ppc["like1"].shape == (n_samples, 20)
        assert ppc["like2"].shape == (n_samples, 20)
        assert ppc["like3"].shape == (n_samples, 20)