示例#1
0
    def test_sum_normal(self):
        with pm.Model() as model:
            a = pm.Normal("a", sigma=0.2)
            b = pm.Normal("b", mu=a)
            trace = pm.sample()

        with model:
            # test list input
            ppc0 = pm.sample_posterior_predictive([model.test_point], samples=10)
            assert ppc0 == {}
            ppc = pm.sample_posterior_predictive(trace, samples=1000, var_names=["b"])
            assert len(ppc) == 1
            assert ppc["b"].shape == (1000,)
            scale = np.sqrt(1 + 0.2 ** 2)
            _, pval = stats.kstest(ppc["b"], stats.norm(scale=scale).cdf)
            assert pval > 0.001

            # test list input
            ppc0 = pm.fast_sample_posterior_predictive([model.test_point], samples=10)
            assert ppc0 == {}
            ppc = pm.fast_sample_posterior_predictive(trace, samples=1000, var_names=["b"])
            assert len(ppc) == 1
            assert ppc["b"].shape == (1000,)
            scale = np.sqrt(1 + 0.2 ** 2)
            _, pval = stats.kstest(ppc["b"], stats.norm(scale=scale).cdf)
            assert pval > 0.001
示例#2
0
    def test_exceptions(self, caplog):
        with pm.Model() as model:
            mu = pm.Normal("mu", 0.0, 1.0)
            a = pm.Normal("a", mu=mu, sigma=1, observed=np.array([0.5, 0.2]))
            trace = pm.sample()

        with model:
            with pytest.raises(IncorrectArgumentsError):
                ppc = pm.sample_posterior_predictive(trace,
                                                     samples=10,
                                                     keep_size=True)
            with pytest.raises(IncorrectArgumentsError):
                ppc = pm.fast_sample_posterior_predictive(trace,
                                                          samples=10,
                                                          keep_size=True)

            # Not for fast_sample_posterior_predictive
            with pytest.raises(IncorrectArgumentsError):
                ppc = pm.sample_posterior_predictive(trace,
                                                     size=4,
                                                     keep_size=True)
            with pytest.raises(IncorrectArgumentsError):
                ppc = pm.sample_posterior_predictive(trace,
                                                     vars=[a],
                                                     var_names=["a"])
            # test wrong type argument
            bad_trace = {"mu": stats.norm.rvs(size=1000)}
            with pytest.raises(TypeError):
                ppc = pm.sample_posterior_predictive(bad_trace)
            with pytest.raises(TypeError):
                ppc = pm.fast_sample_posterior_predictive(bad_trace)
示例#3
0
    def test_vector_observed(self):
        with pm.Model() as model:
            mu = pm.Normal("mu", mu=0, sigma=1)
            a = pm.Normal("a", mu=mu, sigma=1, observed=np.array([0.0, 1.0]))
            trace = pm.sample()

        with model:
            # test list input
            ppc0 = pm.sample_posterior_predictive([model.test_point], samples=10)
            ppc = pm.sample_posterior_predictive(trace, samples=12, var_names=[])
            assert len(ppc) == 0
            ppc = pm.sample_posterior_predictive(trace, samples=12, var_names=["a"])
            assert "a" in ppc
            assert ppc["a"].shape == (12, 2)

            ppc = pm.sample_posterior_predictive(trace, samples=10, var_names=["a"], size=4)
            assert "a" in ppc
            assert ppc["a"].shape == (10, 4, 2)

            # now with fast version
            # test list input
            ppc0 = pm.fast_sample_posterior_predictive([model.test_point], samples=10)
            ppc = pm.fast_sample_posterior_predictive(trace, samples=12, var_names=[])
            assert len(ppc) == 0
            ppc = pm.fast_sample_posterior_predictive(trace, samples=12, var_names=["a"])
            assert "a" in ppc
            assert ppc["a"].shape == (12, 2)
示例#4
0
    def test_sample(self):
        x = np.random.normal(size=100)
        y = x + np.random.normal(scale=1e-2, size=100)

        x_pred = np.linspace(-3, 3, 200)

        x_shared = theano.shared(x)

        with pm.Model() as model:
            b = pm.Normal("b", 0.0, 10.0)
            pm.Normal("obs", b * x_shared, np.sqrt(1e-2), observed=y)
            prior_trace0 = pm.sample_prior_predictive(1000)

            trace = pm.sample(1000, init=None, tune=1000, chains=1)
            pp_trace0 = pm.sample_posterior_predictive(trace, 1000)
            pp_trace01 = pm.fast_sample_posterior_predictive(trace, 1000)

            x_shared.set_value(x_pred)
            prior_trace1 = pm.sample_prior_predictive(1000)
            pp_trace1 = pm.sample_posterior_predictive(trace, 1000)
            pp_trace11 = pm.fast_sample_posterior_predictive(trace, 1000)

        assert prior_trace0["b"].shape == (1000,)
        assert prior_trace0["obs"].shape == (1000, 100)
        np.testing.assert_allclose(x, pp_trace0["obs"].mean(axis=0), atol=1e-1)
        np.testing.assert_allclose(x, pp_trace01["obs"].mean(axis=0), atol=1e-1)

        assert prior_trace1["b"].shape == (1000,)
        assert prior_trace1["obs"].shape == (1000, 200)
        np.testing.assert_allclose(x_pred, pp_trace1["obs"].mean(axis=0), atol=1e-1)
        np.testing.assert_allclose(x_pred, pp_trace11["obs"].mean(axis=0), atol=1e-1)
示例#5
0
    def test_normal_scalar(self):
        nchains = 2
        ndraws = 500
        with pm.Model() as model:
            mu = pm.Normal("mu", 0.0, 1.0)
            a = pm.Normal("a", mu=mu, sigma=1, observed=0.0)
            trace = pm.sample(draws=ndraws, chains=nchains)

        with model:
            # test list input
            ppc0 = pm.sample_posterior_predictive([model.test_point],
                                                  samples=10)
            ppc0 = pm.fast_sample_posterior_predictive([model.test_point],
                                                       samples=10)
            # deprecated argument is not introduced to fast version [2019/08/20:rpg]
            with pytest.warns(DeprecationWarning):
                ppc = pm.sample_posterior_predictive(trace, vars=[a])
            # test empty ppc
            ppc = pm.sample_posterior_predictive(trace, var_names=[])
            assert len(ppc) == 0
            ppc = pm.fast_sample_posterior_predictive(trace, var_names=[])
            assert len(ppc) == 0

            # test keep_size parameter
            ppc = pm.sample_posterior_predictive(trace, keep_size=True)
            assert ppc["a"].shape == (nchains, ndraws)
            ppc = pm.fast_sample_posterior_predictive(trace, keep_size=True)
            assert ppc["a"].shape == (nchains, ndraws)

            # test keep_size parameter and idata input
            idata = az.from_pymc3(trace)
            ppc = pm.sample_posterior_predictive(idata, keep_size=True)
            assert ppc["a"].shape == (nchains, ndraws)
            ppc = pm.fast_sample_posterior_predictive(trace, keep_size=True)
            assert ppc["a"].shape == (nchains, ndraws)

            # test default case
            ppc = pm.sample_posterior_predictive(trace, var_names=["a"])
            assert "a" in ppc
            assert ppc["a"].shape == (nchains * ndraws, )
            # mu's standard deviation may have changed thanks to a's observed
            _, pval = stats.kstest(ppc["a"] - trace["mu"],
                                   stats.norm(loc=0, scale=1).cdf)
            assert pval > 0.001

            # test default case
            ppc = pm.fast_sample_posterior_predictive(trace, var_names=["a"])
            assert "a" in ppc
            assert ppc["a"].shape == (nchains * ndraws, )
            # mu's standard deviation may have changed thanks to a's observed
            _, pval = stats.kstest(ppc["a"] - trace["mu"],
                                   stats.norm(loc=0, scale=1).cdf)
            assert pval > 0.001

        # size argument not introduced to fast version [2019/08/20:rpg]
        with model:
            ppc = pm.sample_posterior_predictive(trace,
                                                 size=5,
                                                 var_names=["a"])
            assert ppc["a"].shape == (nchains * ndraws, 5)
示例#6
0
    def test_density_dist_without_random_not_sampleable(self):
        with pm.Model() as model:
            mu = pm.Normal('mu', 0, 1)
            normal_dist = pm.Normal.dist(mu, 1)
            pm.DensityDist('density_dist', normal_dist.logp, observed=np.random.randn(100))
            trace = pm.sample(100)

        samples = 500
        with pytest.raises(ValueError):
            pm.sample_posterior_predictive(trace, samples=samples, model=model, size=100)

        with pytest.raises((TypeError, ValueError)):
            pm.fast_sample_posterior_predictive(trace, samples=samples, model=model, size=100)
示例#7
0
    def test_potentials_warning(self):
        warning_msg = "The effect of Potentials on other parameters is ignored during"
        with pm.Model() as m:
            a = pm.Normal("a", 0, 1)
            p = pm.Potential("p", a + 1)
            obs = pm.Normal("obs", a, 1, observed=5)

        trace = az.from_dict({"a": np.random.rand(10)})
        with m:
            with pytest.warns(UserWarning, match=warning_msg):
                pm.sample_posterior_predictive(trace, samples=5)

            with pytest.warns(UserWarning, match=warning_msg):
                pm.fast_sample_posterior_predictive(trace, samples=5)
示例#8
0
    def test_normal_vector(self, caplog):
        with pm.Model() as model:
            mu = pm.Normal("mu", 0.0, 1.0)
            a = pm.Normal("a", mu=mu, sigma=1, observed=np.array([0.5, 0.2]))
            trace = pm.sample()

        with model:
            # test list input
            ppc0 = pm.sample_posterior_predictive([model.test_point],
                                                  samples=10)
            ppc = pm.sample_posterior_predictive(trace,
                                                 samples=12,
                                                 var_names=[])
            assert len(ppc) == 0

            # test list input
            ppc0 = pm.fast_sample_posterior_predictive([model.test_point],
                                                       samples=10)
            ppc = pm.fast_sample_posterior_predictive(trace,
                                                      samples=12,
                                                      var_names=[])
            assert len(ppc) == 0

            # test keep_size parameter
            ppc = pm.sample_posterior_predictive(trace, keep_size=True)
            assert ppc["a"].shape == (trace.nchains, len(trace), 2)
            with pytest.warns(UserWarning):
                ppc = pm.sample_posterior_predictive(trace,
                                                     samples=12,
                                                     var_names=["a"])
            assert "a" in ppc
            assert ppc["a"].shape == (12, 2)

            # test keep_size parameter
            ppc = pm.fast_sample_posterior_predictive(trace, keep_size=True)
            assert ppc["a"].shape == (trace.nchains, len(trace), 2)
            with pytest.warns(UserWarning):
                ppc = pm.fast_sample_posterior_predictive(trace,
                                                          samples=12,
                                                          var_names=["a"])
            assert "a" in ppc
            assert ppc["a"].shape == (12, 2)

            # size unsupported by fast_ version  argument. [2019/08/19:rpg]
            ppc = pm.sample_posterior_predictive(trace,
                                                 samples=10,
                                                 var_names=["a"],
                                                 size=4)
            assert "a" in ppc
            assert ppc["a"].shape == (10, 4, 2)
示例#9
0
 def test_sample_from_xarray_posterior_fast(self,
                                            point_list_arg_bug_fixture):
     pmodel, trace = point_list_arg_bug_fixture
     idat = az.from_pymc3(trace)
     with pmodel:
         pp = pm.fast_sample_posterior_predictive(idat.posterior,
                                                  var_names=["d"])
示例#10
0
    def test_deterministic_of_observed(self):
        meas_in_1 = pm.theanof.floatX(2 + 4 * np.random.randn(100))
        meas_in_2 = pm.theanof.floatX(5 + 4 * np.random.randn(100))
        with pm.Model() as model:
            mu_in_1 = pm.Normal("mu_in_1", 0, 1)
            sigma_in_1 = pm.HalfNormal("sd_in_1", 1)
            mu_in_2 = pm.Normal("mu_in_2", 0, 1)
            sigma_in_2 = pm.HalfNormal("sd__in_2", 1)

            in_1 = pm.Normal("in_1", mu_in_1, sigma_in_1, observed=meas_in_1)
            in_2 = pm.Normal("in_2", mu_in_2, sigma_in_2, observed=meas_in_2)
            out_diff = in_1 + in_2
            pm.Deterministic("out", out_diff)

            trace = pm.sample(100)
            ppc_trace = pm.trace_to_dataframe(
                trace, varnames=[n for n in trace.varnames
                                 if n != "out"]).to_dict("records")
            with pytest.warns(DeprecationWarning):
                ppc = pm.sample_posterior_predictive(
                    model=model,
                    trace=ppc_trace,
                    samples=len(ppc_trace),
                    vars=(model.deterministics + model.basic_RVs),
                )

            rtol = 1e-5 if theano.config.floatX == "float64" else 1e-3
            assert np.allclose(ppc["in_1"] + ppc["in_2"],
                               ppc["out"],
                               rtol=rtol)

            ppc = pm.sample_posterior_predictive(
                model=model,
                trace=ppc_trace,
                samples=len(ppc_trace),
                var_names=[
                    var.name
                    for var in (model.deterministics + model.basic_RVs)
                ],
            )

            rtol = 1e-5 if theano.config.floatX == "float64" else 1e-3
            assert np.allclose(ppc["in_1"] + ppc["in_2"],
                               ppc["out"],
                               rtol=rtol)

            ppc = pm.fast_sample_posterior_predictive(
                model=model,
                trace=ppc_trace,
                samples=len(ppc_trace),
                var_names=[
                    var.name
                    for var in (model.deterministics + model.basic_RVs)
                ],
            )

            rtol = 1e-5 if theano.config.floatX == "float64" else 1e-3
            assert np.allclose(ppc["in_1"] + ppc["in_2"],
                               ppc["out"],
                               rtol=rtol)
示例#11
0
    def sample_all(self,
                   *,
                   model: pm.Model = None,
                   var_names: List[str],
                   **sampler_kwargs) -> arviz.InferenceData:
        """
        Sample the model and return the trace.
        Parameters
        ----------
        model : optional
            A model previously created using `self.build_model()`.
            Build a new model if None (default)
        var_names: List[str]
            Variables names passed to `pm.fast_sample_posterior_predictive`
        **sampler_kwargs : dict
            Additional arguments to `pm.sample`
        """
        if model is None:
            model = self.build_model()

        with model:
            prior_checks = pm.sample_prior_predictive()
            trace = pm.sample(return_inferencedata=False, **sampler_kwargs)
            post_checks = pm.fast_sample_posterior_predictive(
                trace, var_names=var_names)

        return arviz.from_pymc3(
            trace=trace,
            prior=prior_checks,
            posterior_predictive=post_checks,
            model=model,
        )
示例#12
0
    def test_shared_data_as_index(self):
        """
        Allow pm.Data to be used for index variables, i.e with integers as well as floats.
        See https://github.com/pymc-devs/pymc3/issues/3813
        """
        with pm.Model() as model:
            index = pm.Data("index", [2, 0, 1, 0, 2])
            y = pm.Data("y", [1.0, 2.0, 3.0, 2.0, 1.0])
            alpha = pm.Normal("alpha", 0, 1.5, shape=3)
            pm.Normal("obs", alpha[index], np.sqrt(1e-2), observed=y)

            prior_trace = pm.sample_prior_predictive(1000, var_names=["alpha"])
            trace = pm.sample(1000, init=None, tune=1000, chains=1)

        # Predict on new data
        new_index = np.array([0, 1, 2])
        new_y = [5.0, 6.0, 9.0]
        with model:
            pm.set_data(new_data={"index": new_index, "y": new_y})
            pp_trace = pm.sample_posterior_predictive(
                trace, 1000, var_names=["alpha", "obs"])
            pp_tracef = pm.fast_sample_posterior_predictive(
                trace, 1000, var_names=["alpha", "obs"])

        assert prior_trace["alpha"].shape == (1000, 3)
        assert trace["alpha"].shape == (1000, 3)
        assert pp_trace["alpha"].shape == (1000, 3)
        assert pp_trace["obs"].shape == (1000, 3)
        assert pp_tracef["alpha"].shape == (1000, 3)
        assert pp_tracef["obs"].shape == (1000, 3)
示例#13
0
 def test_point_list_arg_bug_fspp(self, point_list_arg_bug_fixture):
     pmodel, trace = point_list_arg_bug_fixture
     with pmodel:
         pp = pm.fast_sample_posterior_predictive(
             [trace[15]],
             var_names=['d']
         )
示例#14
0
    def test_model_shared_variable(self):
        x = np.random.randn(100)
        y = x > 0
        x_shared = aesara.shared(x)
        y_shared = aesara.shared(y)
        with pm.Model() as model:
            coeff = pm.Normal("x", mu=0, sd=1)
            logistic = pm.Deterministic("p", pm.math.sigmoid(coeff * x_shared))

            obs = pm.Bernoulli("obs", p=logistic, observed=y_shared)
            trace = pm.sample(100)

        x_shared.set_value([-1, 0, 1.0])
        y_shared.set_value([0, 0, 0])

        samples = 100
        with model:
            post_pred = pm.sample_posterior_predictive(
                trace, samples=samples, var_names=["p", "obs"]
            )

        expected_p = np.array([logistic.eval({coeff: val}) for val in trace["x"][:samples]])
        assert post_pred["obs"].shape == (samples, 3)
        npt.assert_allclose(post_pred["p"], expected_p)

        # fast version
        samples = 100
        with model:
            post_pred = pm.fast_sample_posterior_predictive(
                trace, samples=samples, var_names=["p", "obs"]
            )

        expected_p = np.array([logistic.eval({coeff: val}) for val in trace["x"][:samples]])
        assert post_pred["obs"].shape == (samples, 3)
        npt.assert_allclose(post_pred["p"], expected_p)
示例#15
0
    def test_deterministic_of_observed(self):
        meas_in_1 = pm.theanof.floatX(2 + 4 * np.random.randn(10))
        meas_in_2 = pm.theanof.floatX(5 + 4 * np.random.randn(10))
        nchains = 2
        with pm.Model() as model:
            mu_in_1 = pm.Normal("mu_in_1", 0, 1)
            sigma_in_1 = pm.HalfNormal("sd_in_1", 1)
            mu_in_2 = pm.Normal("mu_in_2", 0, 1)
            sigma_in_2 = pm.HalfNormal("sd__in_2", 1)

            in_1 = pm.Normal("in_1", mu_in_1, sigma_in_1, observed=meas_in_1)
            in_2 = pm.Normal("in_2", mu_in_2, sigma_in_2, observed=meas_in_2)
            out_diff = in_1 + in_2
            pm.Deterministic("out", out_diff)

            trace = pm.sample(100, chains=nchains)
            np.random.seed(0)
            with pytest.warns(DeprecationWarning):
                ppc = pm.sample_posterior_predictive(
                    model=model,
                    trace=trace,
                    samples=len(trace) * nchains,
                    vars=(model.deterministics + model.basic_RVs),
                )

            rtol = 1e-5 if theano.config.floatX == "float64" else 1e-4
            npt.assert_allclose(ppc["in_1"] + ppc["in_2"],
                                ppc["out"],
                                rtol=rtol)

            np.random.seed(0)
            ppc = pm.sample_posterior_predictive(
                model=model,
                trace=trace,
                samples=len(trace) * nchains,
                var_names=[
                    var.name
                    for var in (model.deterministics + model.basic_RVs)
                ],
            )

            npt.assert_allclose(ppc["in_1"] + ppc["in_2"],
                                ppc["out"],
                                rtol=rtol)

            np.random.seed(0)
            ppc = pm.fast_sample_posterior_predictive(
                model=model,
                trace=trace,
                samples=len(trace) * nchains,
                var_names=[
                    var.name
                    for var in (model.deterministics + model.basic_RVs)
                ],
            )

            npt.assert_allclose(ppc["in_1"] + ppc["in_2"],
                                ppc["out"],
                                rtol=rtol)
示例#16
0
    def test_sample(self):
        x = np.random.normal(size=100)
        y = x + np.random.normal(scale=1e-2, size=100)

        x_pred = np.linspace(-3, 3, 200, dtype='float32')

        with pm.Model():
            x_shared = pm.Data('x_shared', x)
            b = pm.Normal('b', 0., 10.)
            pm.Normal('obs', b * x_shared, np.sqrt(1e-2), observed=y)
            prior_trace0 = pm.sample_prior_predictive(1000)

            trace = pm.sample(1000, init=None, tune=1000, chains=1)
            pp_trace0 = pm.sample_posterior_predictive(trace, 1000)
            pp_trace01 = pm.fast_sample_posterior_predictive(trace, 1000)

            x_shared.set_value(x_pred)
            pp_trace1 = pm.sample_posterior_predictive(trace, samples=1000)
            pp_trace11 = pm.fast_sample_posterior_predictive(trace,
                                                             samples=1000)
            prior_trace1 = pm.sample_prior_predictive(1000)

        assert prior_trace0['b'].shape == (1000, )
        assert prior_trace0['obs'].shape == (1000, 100)
        assert prior_trace1['obs'].shape == (1000, 200)

        assert pp_trace0['obs'].shape == (1000, 100)
        assert pp_trace01['obs'].shape == (1000, 100)

        np.testing.assert_allclose(x, pp_trace0['obs'].mean(axis=0), atol=1e-1)
        np.testing.assert_allclose(x,
                                   pp_trace01['obs'].mean(axis=0),
                                   atol=1e-1)

        assert pp_trace1['obs'].shape == (1000, 200)
        assert pp_trace11['obs'].shape == (1000, 200)

        np.testing.assert_allclose(x_pred,
                                   pp_trace1['obs'].mean(axis=0),
                                   atol=1e-1)
        np.testing.assert_allclose(x_pred,
                                   pp_trace11['obs'].mean(axis=0),
                                   atol=1e-1)
示例#17
0
def test_mixture_random_shape_fast():
    # test the shape broadcasting in mixture random
    y = np.concatenate([nr.poisson(5, size=10),
                        nr.poisson(9, size=10)])
    with pm.Model() as m:
        comp0 = pm.Poisson.dist(mu=np.ones(2))
        w0 = pm.Dirichlet('w0', a=np.ones(2))
        like0 = pm.Mixture('like0',
                           w=w0,
                           comp_dists=comp0,
                           observed=y)

        comp1 = pm.Poisson.dist(mu=np.ones((20, 2)),
                                shape=(20, 2))
        w1 = pm.Dirichlet('w1', a=np.ones(2))
        like1 = pm.Mixture('like1',
                           w=w1,
                           comp_dists=comp1,
                           observed=y)

        comp2 = pm.Poisson.dist(mu=np.ones(2))
        w2 = pm.Dirichlet('w2',
                          a=np.ones(2),
                          shape=(20, 2))
        like2 = pm.Mixture('like2',
                           w=w2,
                           comp_dists=comp2,
                           observed=y)

        comp3 = pm.Poisson.dist(mu=np.ones(2),
                                shape=(20, 2))
        w3 = pm.Dirichlet('w3',
                          a=np.ones(2),
                          shape=(20, 2))
        like3 = pm.Mixture('like3',
                           w=w3,
                           comp_dists=comp3,
                           observed=y)

    rand0, rand1, rand2, rand3 = draw_values([like0, like1, like2, like3],
                                             point=m.test_point,
                                             size=100)
    assert rand0.shape == (100, 20)
    assert rand1.shape == (100, 20)
    assert rand2.shape == (100, 20)
    assert rand3.shape == (100, 20)

    # I *think* that the mixture means that this is not going to work,
    # but I could be wrong. [2019/08/22:rpg]
    with m:
        ppc = pm.fast_sample_posterior_predictive([m.test_point], samples=200)
    assert ppc['like0'].shape == (200, 20)
    assert ppc['like1'].shape == (200, 20)
    assert ppc['like2'].shape == (200, 20)
    assert ppc['like3'].shape == (200, 20)
示例#18
0
    def forecast_election(self,
                          idata: arviz.InferenceData) -> arviz.InferenceData:
        """
        Generate out-of-sample predictions for ``election_to_predict`` specified in ``__init__``.

        Parameters
        ----------
        idata: arviz.InferenceData
            Posterior trace generated by ``self.sample_all`` on the training dataset.
            The dataset used for predictions is generated automatically: one observation for each
            of the days in ``self.coords["countdown"]``. The corresponding values of predictors are
            handled automatically.
        """
        new_dates, oos_data = self._generate_oos_data(idata)
        oos_data = self._join_with_continuous_predictors(oos_data)
        forecast_data_index = pd.DataFrame(
            data=0,  # just a placeholder
            index=pd.MultiIndex.from_frame(oos_data),
            columns=self.parties_complete,
        )
        forecast_data = forecast_data_index.reset_index()

        PREDICTION_COORDS = {"observations": new_dates}
        PREDICTION_DIMS = {
            "latent_popularity": ["observations", "parties_complete"],
            "noisy_popularity": ["observations", "parties_complete"],
            "N_approve": ["observations", "parties_complete"],
        }

        forecast_model = self.build_model(
            polls=forecast_data,
            continuous_predictors=forecast_data,
        )
        with forecast_model:
            ppc = pm.fast_sample_posterior_predictive(
                idata,
                var_names=[
                    "party_intercept",
                    "latent_popularity",
                    "noisy_popularity",
                    "N_approve",
                    "latent_pop_t0",
                    "R",
                ],
            )
            ppc = arviz.from_pymc3_predictions(
                ppc,
                idata_orig=idata,
                inplace=False,
                coords=PREDICTION_COORDS,
                dims=PREDICTION_DIMS,
            )

        return ppc
示例#19
0
    def test_density_dist_with_random_sampleable_failure(self, shape):
        with pm.Model() as model:
            mu = pm.Normal('mu', 0, 1)
            normal_dist = pm.Normal.dist(mu, 1, shape=shape)
            pm.DensityDist(
                'density_dist',
                normal_dist.logp,
                observed=np.random.randn(100, *shape),
                shape=shape,
                random=normal_dist.random,
                wrap_random_with_dist_shape=False
            )
            trace = pm.sample(100)

        samples = 500
        with pytest.raises(RuntimeError):
            pm.sample_posterior_predictive(trace, samples=samples, model=model, size=100)

        with pytest.raises((TypeError, RuntimeError)):
            pm.fast_sample_posterior_predictive(trace, samples=samples, model=model, size=100)
示例#20
0
    def sample_mod(
        self, 
        posterior_draws = 2000, # this is not enough
        post_pred_draws = 1000,
        prior_pred_draws = 1000,
        random_seed = 42,
        chains = 2):
        """Sample the posterior, the posterior predictive and the prior predictive distribution.

        Args:
            posterior_draws (int, optional): Number of draws for the posterior. Defaults to 2000.
            prior_pred_draws (int, optional): Number of draws for the prior predictive distribution. Defaults to 1000.
            post_pred_draws (int, optional): Number of draws from the posterior predictive distribution. Defaults to 1000.
            random_seed (int, optional): Random seed for ensuring reproducibility. Defaults to 42.
            chains (int, optional): Number of chains used for sampling the posterior. Defaults to 2.

        Example:
            Pc.sample_mod(posterior_draws = 3000, post_pred_draws = 1500, prior_pred_draws = 55, random_seed = 13, chains = 4)
        """        

        # we need these for later
        self.posterior_draws = posterior_draws
        self.post_pred_draws = post_pred_draws
        self.prior_pred_draws = prior_pred_draws
        
        with self.model: 
            self.trace = pm.sample(
                return_inferencedata = False, 
                draws = posterior_draws,
                target_accept = .99,
            random_seed = random_seed,
            chains = chains) #hard set to 42
            self.post_pred = pm.sample_posterior_predictive(self.trace, samples = post_pred_draws)
            self.prior_pred = pm.sample_prior_predictive(samples = prior_pred_draws)
            self.m_idata = az.from_pymc3(trace = self.trace, posterior_predictive=self.post_pred, prior=self.prior_pred)

        with self.model:
            pm.set_data({"t1_shared": self.t1_test})
            pm.set_data({"t2_shared": self.t2_test})
            pm.set_data({"idx_shared": self.idx_test})
            pm.set_data({"t3_shared": np.array(self.t3_test)})
            predictions = pm.fast_sample_posterior_predictive(
                self.m_idata.posterior
            )
            az.from_pymc3_predictions(
                predictions, 
                idata_orig = self.m_idata,
                coords = {'idx': self.test[self.index].values},
                inplace = True)
示例#21
0
    def test_multivariate2(self):
        # Added test for issue #3271
        mn_data = np.random.multinomial(n=100, pvals=[1 / 6.0] * 6, size=10)
        with pm.Model() as dm_model:
            probs = pm.Dirichlet("probs", a=np.ones(6), shape=6)
            obs = pm.Multinomial("obs", n=100, p=probs, observed=mn_data)
            burned_trace = pm.sample(20, tune=10, cores=1)
        sim_priors = pm.sample_prior_predictive(samples=20, model=dm_model)
        sim_ppc = pm.sample_posterior_predictive(burned_trace, samples=20, model=dm_model)
        assert sim_priors["probs"].shape == (20, 6)
        assert sim_priors["obs"].shape == (20,) + obs.distribution.shape
        assert sim_ppc["obs"].shape == (20,) + obs.distribution.shape

        sim_ppc = pm.fast_sample_posterior_predictive(burned_trace, samples=20, model=dm_model)
        assert sim_ppc["obs"].shape == (20,) + obs.distribution.shape
示例#22
0
 def predict(self): ## make this work for only one. 
     
     with self.model:
         pm.set_data({"t1_shared": self.t1_test})
         pm.set_data({"t2_shared": self.t2_test})
         pm.set_data({"idx_shared": self.idx_test})
         pm.set_data({"t3_shared": np.array(self.t3_test)})
         predictions = pm.fast_sample_posterior_predictive(
             self.m_idata.posterior
         )
         az.from_pymc3_predictions(
             predictions, 
             idata_orig = self.m_idata,
             coords = {'idx': self.test[self.index].values},
             inplace = True)
示例#23
0
    def test_model_not_drawable_prior(self):
        data = np.random.poisson(lam=10, size=200)
        model = pm.Model()
        with model:
            mu = pm.HalfFlat("sigma")
            pm.Poisson("foo", mu=mu, observed=data)
            trace = pm.sample(tune=1000)

        with model:
            with pytest.raises(ValueError) as excinfo:
                pm.sample_prior_predictive(50)
            assert "Cannot sample" in str(excinfo.value)
            samples = pm.sample_posterior_predictive(trace, 40)
            assert samples["foo"].shape == (40, 200)

            samples = pm.fast_sample_posterior_predictive(trace, 40)
            assert samples["foo"].shape == (40, 200)
示例#24
0
    def sample_posterior_predictive(self, kind='pymc', fast=False, **kwargs):
        """ Sample posterior predictive for 
            base class self.sample_posterior_predictive_kws or passed kwargs 
            
            Option to use pm.fast_sample_posterior_predictive()
        """
        random_seed = kwargs.pop('random_seed', self.sample_kws['random_seed'])

        if self.model is None:
            self.build()

        with self.model:
            if fast:
                self._posterior_predictive = pm.fast_sample_posterior_predictive(
                    self.trace, random_seed=random_seed, **kwargs)
            else:
                self._posterior_predictive = pm.sample_posterior_predictive(
                    self.trace, random_seed=random_seed, **kwargs)
        return None
示例#25
0
    def test_density_dist_with_random_sampleable_handcrafted_success_fast(self):
        with pm.Model() as model:
            mu = pm.Normal('mu', 0, 1)
            normal_dist = pm.Normal.dist(mu, 1)
            rvs = pm.Normal.dist(mu, 1, shape=100).random
            obs = pm.DensityDist(
                'density_dist',
                normal_dist.logp,
                observed=np.random.randn(100),
                random=rvs,
                wrap_random_with_dist_shape=False
            )
            trace = pm.sample(100)

        samples = 500
        size = 100

        ppc = pm.fast_sample_posterior_predictive(trace, samples=samples, model=model, size=size)
        assert ppc['density_dist'].shape == (samples, size) + obs.distribution.shape
示例#26
0
    def test_sample_posterior_predictive_after_set_data(self):
        with pm.Model() as model:
            x = pm.Data('x', [1., 2., 3.])
            y = pm.Data('y', [1., 2., 3.])
            beta = pm.Normal('beta', 0, 10.)
            pm.Normal('obs', beta * x, np.sqrt(1e-2), observed=y)
            trace = pm.sample(1000, tune=1000, chains=1)
        # Predict on new data.
        with model:
            x_test = [5, 6, 9]
            pm.set_data(new_data={'x': x_test})
            y_test = pm.sample_posterior_predictive(trace)
            y_test1 = pm.fast_sample_posterior_predictive(trace)

        assert y_test['obs'].shape == (1000, 3)
        assert y_test1['obs'].shape == (1000, 3)
        np.testing.assert_allclose(x_test, y_test['obs'].mean(axis=0),
                                   atol=1e-1)
        np.testing.assert_allclose(x_test, y_test1['obs'].mean(axis=0),
                                   atol=1e-1)
示例#27
0
    def test_sample_posterior_predictive_after_set_data(self):
        with pm.Model() as model:
            x = pm.Data("x", [1.0, 2.0, 3.0])
            y = pm.Data("y", [1.0, 2.0, 3.0])
            beta = pm.Normal("beta", 0, 10.0)
            pm.Normal("obs", beta * x, np.sqrt(1e-2), observed=y)
            trace = pm.sample(1000, tune=1000, chains=1)
        # Predict on new data.
        with model:
            x_test = [5, 6, 9]
            pm.set_data(new_data={"x": x_test})
            y_test = pm.sample_posterior_predictive(trace)
            y_test1 = pm.fast_sample_posterior_predictive(trace)

        assert y_test["obs"].shape == (1000, 3)
        assert y_test1["obs"].shape == (1000, 3)
        np.testing.assert_allclose(x_test,
                                   y_test["obs"].mean(axis=0),
                                   atol=1e-1)
        np.testing.assert_allclose(x_test,
                                   y_test1["obs"].mean(axis=0),
                                   atol=1e-1)
示例#28
0
    def test_sample_after_set_data(self):
        with pm.Model() as model:
            x = pm.Data('x', [1., 2., 3.])
            y = pm.Data('y', [1., 2., 3.])
            beta = pm.Normal('beta', 0, 10.)
            pm.Normal('obs', beta * x, np.sqrt(1e-2), observed=y)
            pm.sample(1000, init=None, tune=1000, chains=1)
        # Predict on new data.
        new_x = [5., 6., 9.]
        new_y = [5., 6., 9.]
        with model:
            pm.set_data(new_data={'x': new_x, 'y': new_y})
            new_trace = pm.sample(1000, init=None, tune=1000, chains=1)
            pp_trace = pm.sample_posterior_predictive(new_trace, 1000)
            pp_tracef = pm.fast_sample_posterior_predictive(new_trace, 1000)

        assert pp_trace['obs'].shape == (1000, 3)
        assert pp_tracef['obs'].shape == (1000, 3)
        np.testing.assert_allclose(new_y, pp_trace['obs'].mean(axis=0),
                                   atol=1e-1)
        np.testing.assert_allclose(new_y, pp_tracef['obs'].mean(axis=0),
                                   atol=1e-1)
示例#29
0
    def test_density_dist_with_random_sampleable_hidden_error(self, shape):
        with pm.Model() as model:
            mu = pm.Normal('mu', 0, 1)
            normal_dist = pm.Normal.dist(mu, 1, shape=shape)
            obs = pm.DensityDist(
                'density_dist',
                normal_dist.logp,
                observed=np.random.randn(100, *shape),
                shape=shape,
                random=normal_dist.random,
                wrap_random_with_dist_shape=False,
                check_shape_in_random=False
            )
            trace = pm.sample(100)

        samples = 500
        ppc = pm.sample_posterior_predictive(trace, samples=samples, model=model)
        assert len(ppc['density_dist']) == samples
        assert ((samples,) + obs.distribution.shape) != ppc['density_dist'].shape

        ppc = pm.fast_sample_posterior_predictive(trace, samples=samples, model=model)
        assert len(ppc['density_dist']) == samples
        assert ((samples,) + obs.distribution.shape) != ppc['density_dist'].shape
示例#30
0
    def test_sample_after_set_data(self):
        with pm.Model() as model:
            x = pm.Data("x", [1.0, 2.0, 3.0])
            y = pm.Data("y", [1.0, 2.0, 3.0])
            beta = pm.Normal("beta", 0, 10.0)
            pm.Normal("obs", beta * x, np.sqrt(1e-2), observed=y)
            pm.sample(1000, init=None, tune=1000, chains=1)
        # Predict on new data.
        new_x = [5.0, 6.0, 9.0]
        new_y = [5.0, 6.0, 9.0]
        with model:
            pm.set_data(new_data={"x": new_x, "y": new_y})
            new_trace = pm.sample(1000, init=None, tune=1000, chains=1)
            pp_trace = pm.sample_posterior_predictive(new_trace, 1000)
            pp_tracef = pm.fast_sample_posterior_predictive(new_trace, 1000)

        assert pp_trace["obs"].shape == (1000, 3)
        assert pp_tracef["obs"].shape == (1000, 3)
        np.testing.assert_allclose(new_y,
                                   pp_trace["obs"].mean(axis=0),
                                   atol=1e-1)
        np.testing.assert_allclose(new_y,
                                   pp_tracef["obs"].mean(axis=0),
                                   atol=1e-1)