Example #1
0
 def test_deprecated_parallel_arg(self):
     with self.fast_model:
         with pytest.warns(
                 FutureWarning,
                 match="The argument parallel is deprecated",
         ):
             pm.sample_smc(draws=10, chains=1, parallel=False)
Example #2
0
 def test_convergence_checks(self):
     with self.fast_model:
         with pytest.warns(
                 UserWarning,
                 match="The number of samples is too small",
         ):
             pm.sample_smc(draws=99)
Example #3
0
    def test_kernel_kwargs(self):
        with self.fast_model:
            trace = pm.sample_smc(
                draws=10,
                chains=1,
                threshold=0.7,
                correlation_threshold=0.02,
                return_inferencedata=False,
                kernel=pm.smc.IMH,
            )

            assert trace.report.threshold == 0.7
            assert trace.report.n_draws == 10

            assert trace.report.correlation_threshold == 0.02

        with self.fast_model:
            trace = pm.sample_smc(
                draws=10,
                chains=1,
                threshold=0.95,
                correlation_threshold=0.02,
                return_inferencedata=False,
                kernel=pm.smc.MH,
            )

            assert trace.report.threshold == 0.95
            assert trace.report.n_draws == 10
            assert trace.report.correlation_threshold == 0.02
Example #4
0
    def test_deprecated_abc_args(self):
        with self.fast_model:
            with pytest.warns(
                    FutureWarning,
                    match=
                    'The kernel string argument "ABC" in sample_smc has been deprecated',
            ):
                pm.sample_smc(draws=10, chains=1, kernel="ABC")

            with pytest.warns(
                    FutureWarning,
                    match=
                    'The kernel string argument "Metropolis" in sample_smc has been deprecated',
            ):
                pm.sample_smc(draws=10, chains=1, kernel="Metropolis")

            with pytest.warns(
                    FutureWarning,
                    match="save_sim_data has been deprecated",
            ):
                pm.sample_smc(draws=10, chains=1, save_sim_data=True)

            with pytest.warns(
                    FutureWarning,
                    match="save_log_pseudolikelihood has been deprecated",
            ):
                pm.sample_smc(draws=10,
                              chains=1,
                              save_log_pseudolikelihood=True)
Example #5
0
    def test_return_datatype(self, chains):
        draws = 10

        with self.fast_model:
            idata = pm.sample_smc(chains=chains, draws=draws)
            mt = pm.sample_smc(chains=chains, draws=draws, return_inferencedata=False)

        assert isinstance(idata, InferenceData)
        assert "sample_stats" in idata
        assert idata.posterior.dims["chain"] == chains
        assert idata.posterior.dims["draw"] == draws

        assert isinstance(mt, MultiTrace)
        assert mt.nchains == chains
        assert mt["x"].size == chains * draws
Example #6
0
    def test_nested_simulators(self):
        true_a = 2
        rng = self.get_random_state()
        data = rng.normal(true_a, 0.1, size=1000)

        with pm.Model() as m:
            sim1 = pm.Simulator(
                "sim1",
                self.normal_sim,
                params=(0, 4),
                distance="gaussian",
                sum_stat="identity",
            )
            sim2 = pm.Simulator(
                "sim2",
                self.normal_sim,
                params=(sim1, 0.1),
                distance="gaussian",
                sum_stat="mean",
                epsilon=0.1,
                observed=data,
            )

        assert self.count_rvs(m.logpt) == 2

        with m:
            trace = pm.sample_smc(return_inferencedata=False)

        assert np.abs(true_a - trace["sim1"].mean()) < 0.1
Example #7
0
 def test_name_is_string_type(self):
     with self.SMABC_potential:
         assert not self.SMABC_potential.name
         trace = pm.sample_smc(draws=10,
                               chains=1,
                               return_inferencedata=False)
         assert isinstance(trace._straces[0].name, str)
Example #8
0
    def test_unobserved_categorical(self):
        with pm.Model() as m:
            mu = pm.Categorical("mu", p=[0.1, 0.3, 0.6], size=2)
            pm.Normal("like", mu=mu, sigma=0.1, observed=[1, 2])

            trace = pm.sample_smc(chains=1, return_inferencedata=False)

        assert np.all(np.median(trace["mu"], axis=0) == [1, 2])
Example #9
0
    def test_sample(self):
        with self.SMC_test:
            mtrace = pm.sample_smc(draws=self.samples,
                                   return_inferencedata=False)

        x = mtrace["X"]
        mu1d = np.abs(x).mean(axis=0)
        np.testing.assert_allclose(self.muref, mu1d, rtol=0.0, atol=0.03)
Example #10
0
    def test_model_with_potential(self):
        assert self.count_rvs(self.SMABC_potential.logpt) == 1

        with self.SMABC_potential:
            trace = pm.sample_smc(draws=100,
                                  chains=1,
                                  return_inferencedata=False)
            assert np.all(trace["a"] >= 0)
Example #11
0
    def test_custom_dist_sum_stat(self):
        with pm.Model() as m:
            a = pm.Normal("a", mu=0, sigma=1)
            b = pm.HalfNormal("b", sigma=1)
            s = pm.Simulator(
                "s",
                self.normal_sim,
                a,
                b,
                distance=self.abs_diff,
                sum_stat=self.quantiles,
                observed=self.data,
            )

        assert self.count_rvs(m.logpt) == 1

        with m:
            pm.sample_smc(draws=100)
Example #12
0
    def test_multiple_simulators(self):
        true_a = 2
        true_b = -2

        data1 = np.random.normal(true_a, 0.1, size=1000)
        data2 = np.random.normal(true_b, 0.1, size=1000)

        with pm.Model() as m:
            a = pm.Normal("a", mu=0, sigma=3)
            b = pm.Normal("b", mu=0, sigma=3)
            sim1 = pm.Simulator(
                "sim1",
                self.normal_sim,
                a,
                0.1,
                distance="gaussian",
                sum_stat="sort",
                observed=data1,
            )
            sim2 = pm.Simulator(
                "sim2",
                self.normal_sim,
                b,
                0.1,
                distance="laplace",
                sum_stat="mean",
                epsilon=0.1,
                observed=data2,
            )

        assert self.count_rvs(m.logpt()) == 2

        # Check that the logps use the correct methods
        a_val = m.rvs_to_values[a]
        sim1_val = m.rvs_to_values[sim1]
        logp_sim1 = pm.joint_logpt(sim1, sim1_val)
        logp_sim1_fn = aesara.function([a_val], logp_sim1)

        b_val = m.rvs_to_values[b]
        sim2_val = m.rvs_to_values[sim2]
        logp_sim2 = pm.joint_logpt(sim2, sim2_val)
        logp_sim2_fn = aesara.function([b_val], logp_sim2)

        assert any(
            node for node in logp_sim1_fn.maker.fgraph.toposort() if isinstance(node.op, SortOp)
        )

        assert not any(
            node for node in logp_sim2_fn.maker.fgraph.toposort() if isinstance(node.op, SortOp)
        )

        with m:
            trace = pm.sample_smc(return_inferencedata=False)

        assert abs(true_a - trace["a"].mean()) < 0.05
        assert abs(true_b - trace["b"].mean()) < 0.05
Example #13
0
 def test_start(self):
     with pm.Model() as model:
         a = pm.Poisson("a", 5)
         b = pm.HalfNormal("b", 10)
         y = pm.Normal("y", a, b, observed=[1, 2, 3, 4])
         start = {
             "a": np.random.poisson(5, size=500),
             "b_log__": np.abs(np.random.normal(0, 10, size=500)),
         }
         trace = pm.sample_smc(500, chains=1, start=start)
Example #14
0
 def test_proposal_dist_shape(self):
     with pm.Model() as m:
         x = pm.Normal("x", 0, 1)
         y = pm.Normal("y", x, 1, observed=0)
         trace = pm.sample_smc(
             draws=10,
             chains=1,
             kernel=pm.smc.MH,
             return_inferencedata=False,
         )
Example #15
0
    def test_sample(self):
        initial_rng_state = np.random.get_state()
        with self.SMC_test:
            mtrace = pm.sample_smc(draws=self.samples,
                                   return_inferencedata=False)
        assert_random_state_equal(initial_rng_state, np.random.get_state())

        x = mtrace["X"]
        mu1d = np.abs(x).mean(axis=0)
        np.testing.assert_allclose(self.muref, mu1d, rtol=0.0, atol=0.03)
Example #16
0
    def test_normal_model(self):
        data = st.norm(10, 0.5).rvs(1000, random_state=self.get_random_state())
        with pm.Model() as m:
            mu = pm.Normal("mu", 0, 3)
            sigma = pm.HalfNormal("sigma", 1)
            y = pm.Normal("y", mu, sigma, observed=data)
            idata = pm.sample_smc(draws=2000, kernel=pm.smc.MH)

        post = idata.posterior.stack(sample=("chain", "draw"))
        assert np.abs(post["mu"].mean() - 10) < 0.1
        assert np.abs(post["sigma"].mean() - 0.5) < 0.05
Example #17
0
    def test_named_model(self):
        # Named models used to fail with Simulator because the arguments to the
        # random fn used to be passed by name. This is no longer true.
        # https://github.com/pymc-devs/pymc/pull/4365#issuecomment-761221146
        name = "NamedModel"
        with pm.Model(name=name):
            a = pm.Normal("a", mu=0, sigma=1)
            b = pm.HalfNormal("b", sigma=1)
            s = pm.Simulator("s", self.normal_sim, a, b, observed=self.data)

            trace = pm.sample_smc(draws=10, chains=2, return_inferencedata=False)
            assert f"{name}/a" in trace.varnames
            assert f"{name}/b" in trace.varnames
            assert f"{name}/b_log__" in trace.varnames
Example #18
0
    def test_unobserved_bernoulli(self):
        n = 10
        rng = self.get_random_state()
        z_true = np.zeros(n, dtype=int)
        z_true[int(n / 2):] = 1
        y = st.norm(np.array([-1, 1])[z_true], 0.25).rvs(random_state=rng)

        with pm.Model() as m:
            z = pm.Bernoulli("z", p=0.5, size=n)
            mu = pm.math.switch(z, 1.0, -1.0)
            like = pm.Normal("like", mu=mu, sigma=0.25, observed=y)

            trace = pm.sample_smc(chains=1, return_inferencedata=False)

        assert np.all(np.median(trace["z"], axis=0) == z_true)
Example #19
0
    def test_kernel_kwargs(self):
        with self.fast_model:
            trace = pm.sample_smc(
                draws=10,
                chains=1,
                threshold=0.7,
                n_steps=15,
                tune_steps=False,
                p_acc_rate=0.5,
                return_inferencedata=False,
                kernel=pm.smc.IMH,
            )

            assert trace.report.threshold == 0.7
            assert trace.report.n_draws == 10
            assert trace.report.n_tune == 15
            assert trace.report.tune_steps is False
            assert trace.report.p_acc_rate == 0.5

        with self.fast_model:
            trace = pm.sample_smc(
                draws=10,
                chains=1,
                threshold=0.95,
                n_steps=15,
                tune_steps=False,
                p_acc_rate=0.5,
                return_inferencedata=False,
                kernel=pm.smc.MH,
            )

            assert trace.report.threshold == 0.95
            assert trace.report.n_draws == 10
            assert trace.report.n_tune == 15
            assert trace.report.tune_steps is False
            assert trace.report.p_acc_rate == 0.5
Example #20
0
    def test_marginal_likelihood(self):
        data = np.repeat([1, 0], [50, 50])
        marginals = []
        a_prior_0, b_prior_0 = 1.0, 1.0
        a_prior_1, b_prior_1 = 20.0, 20.0

        for alpha, beta in ((a_prior_0, b_prior_0), (a_prior_1, b_prior_1)):
            with pm.Model() as model:
                a = pm.Beta("a", alpha, beta)
                y = pm.Bernoulli("y", a, observed=data)
                trace = pm.sample_smc(2000, return_inferencedata=False)
                marginals.append(trace.report.log_marginal_likelihood)
        # compare to the analytical result
        assert abs(
            np.exp(np.nanmean(marginals[1]) - np.nanmean(marginals[0])) -
            4.0) <= 1
Example #21
0
    def test_one_gaussian(self):
        assert self.count_rvs(self.SMABC_test.logpt) == 1

        with self.SMABC_test:
            trace = pm.sample_smc(draws=1000, return_inferencedata=False)
            pr_p = pm.sample_prior_predictive(1000, return_inferencedata=False)
            po_p = pm.sample_posterior_predictive(trace,
                                                  1000,
                                                  return_inferencedata=False)

        assert abs(self.data.mean() - trace["a"].mean()) < 0.05
        assert abs(self.data.std() - trace["b"].mean()) < 0.05

        assert pr_p["s"].shape == (1000, 1000)
        assert abs(0 - pr_p["s"].mean()) < 0.10
        assert abs(1.4 - pr_p["s"].std()) < 0.10

        assert po_p["s"].shape == (1000, 1000)
        assert abs(self.data.mean() - po_p["s"].mean()) < 0.10
        assert abs(self.data.std() - po_p["s"].std()) < 0.10
Example #22
0
    def test_marginal_likelihood(self):
        """
        Verifies that the log marginal likelihood function
        can be correctly computed for a Beta-Bernoulli model.
        """
        data = np.repeat([1, 0], [50, 50])
        marginals = []
        a_prior_0, b_prior_0 = 1.0, 1.0
        a_prior_1, b_prior_1 = 20.0, 20.0

        for alpha, beta in ((a_prior_0, b_prior_0), (a_prior_1, b_prior_1)):
            with pm.Model() as model:
                a = pm.Beta("a", alpha, beta)
                y = pm.Bernoulli("y", a, observed=data)
                trace = pm.sample_smc(2000, chains=2, return_inferencedata=False)
            # log_marignal_likelihood is found in the last value of each chain
            lml = np.mean([chain[-1] for chain in trace.report.log_marginal_likelihood])
            marginals.append(lml)

        # compare to the analytical result
        assert abs(np.exp(marginals[1] - marginals[0]) - 4.0) <= 1