Exemple #1
0
 def test_multiple_samplers(self, caplog):
     with Model():
         prob = Beta("prob", alpha=5.0, beta=3.0)
         Binomial("outcome", n=1, p=prob)
         caplog.clear()
         sample(3, tune=2, discard_tuned_samples=False, n_init=None, chains=1)
         messages = [msg.msg for msg in caplog.records]
         assert all("boolean index did not" not in msg for msg in messages)
Exemple #2
0
 def test_multiple_samplers(self):
     with Model():
         prob = Beta('prob', alpha=5, beta=3)
         Binomial('outcome', n=1, p=prob)
         with warnings.catch_warnings(record=True) as warns:
             sample(3, tune=2, discard_tuned_samples=False, n_init=None)
         messages = [warn.message.args[0] for warn in warns]
         assert any("contains only 3" in msg for msg in messages)
         assert all('boolean index did not' not in msg for msg in messages)
Exemple #3
0
 def test_checks_population_size(self):
     """Test that population samplers check the population size."""
     with Model() as model:
         n = Normal('n', mu=0, sd=1)
         for stepper in TestPopulationSamplers.steppers:
             step = stepper()
             with pytest.raises(ValueError):
                 trace = sample(draws=100, chains=1, step=step)
             trace = sample(draws=100, chains=4, step=step)
     pass
Exemple #4
0
 def test_custom_proposal_dist(self):
     with Model() as pmodel:
         D = 3
         Normal('n', 0, 2, shape=(D, ))
         trace = sample(tune=100,
                        draws=50,
                        step=DEMetropolisZ(proposal_dist=NormalProposal),
                        cores=1,
                        chains=3,
                        discard_tuned_samples=False)
     pass
Exemple #5
0
    def test_float64(self):
        with Model() as model:
            x = Normal("x", testval=np.array(1.0, dtype="float64"))
            obs = Normal("obs", mu=x, sigma=1.0, observed=np.random.randn(5))

        assert x.dtype == "float64"
        assert obs.dtype == "float64"

        for sampler in self.samplers:
            with model:
                sample(10, sampler())
Exemple #6
0
 def test_checks_population_size(self):
     """Test that population samplers check the population size."""
     with Model() as model:
         n = Normal("n", mu=0, sigma=1)
         for stepper in TestPopulationSamplers.steppers:
             step = stepper()
             with pytest.raises(ValueError):
                 sample(draws=10, tune=10, chains=1, cores=1, step=step)
             # don't parallelize to make test faster
             sample(draws=10, tune=10, chains=4, cores=1, step=step)
     pass
Exemple #7
0
 def test_demcmc_warning_on_small_populations(self):
     """Test that a warning is raised when n_chains <= n_dims"""
     with Model() as model:
         Normal("n", mu=0, sigma=1, shape=(2,3))
         with pytest.warns(UserWarning) as record:
             sample(
                 draws=5, tune=5, chains=6, step=DEMetropolis(),
                 # make tests faster by not parallelizing; disable convergence warning
                 cores=1, compute_convergence_checks=False
             )
     pass
Exemple #8
0
    def test_float64(self):
        with Model() as model:
            x = Normal('x', testval=np.array(1., dtype='float64'))
            obs = Normal('obs', mu=x, sigma=1., observed=np.random.randn(5))

        assert x.dtype == 'float64'
        assert obs.dtype == 'float64'

        for sampler in self.samplers:
            with model:
                sample(10, sampler())
def test_AR():
    # AR1
    data = np.array([0.3, 1, 2, 3, 4])
    phi = np.array([0.99])
    with Model() as t:
        y = AR("y", phi, sigma=1, shape=len(data))
        z = Normal("z", mu=phi * data[:-1], sigma=1, shape=len(data) - 1)
    ar_like = t["y"].logp({"z": data[1:], "y": data})
    reg_like = t["z"].logp({"z": data[1:], "y": data})
    np.testing.assert_allclose(ar_like, reg_like)

    # AR1 and AR(1)
    with Model() as t:
        rho = Normal("rho", 0.0, 1.0)
        y1 = AR1("y1", rho, 1.0, observed=data)
        y2 = AR("y2", rho, 1.0, init=Normal.dist(0, 1), observed=data)
    np.testing.assert_allclose(y1.logp(t.test_point), y2.logp(t.test_point))

    # AR1 + constant
    with Model() as t:
        y = AR("y",
               np.hstack((0.3, phi)),
               sigma=1,
               shape=len(data),
               constant=True)
        z = Normal("z", mu=0.3 + phi * data[:-1], sigma=1, shape=len(data) - 1)
    ar_like = t["y"].logp({"z": data[1:], "y": data})
    reg_like = t["z"].logp({"z": data[1:], "y": data})
    np.testing.assert_allclose(ar_like, reg_like)

    # AR2
    phi = np.array([0.84, 0.10])
    with Model() as t:
        y = AR("y", phi, sigma=1, shape=len(data))
        z = Normal("z",
                   mu=phi[0] * data[1:-1] + phi[1] * data[:-2],
                   sigma=1,
                   shape=len(data) - 2)
    ar_like = t["y"].logp({"z": data[2:], "y": data})
    reg_like = t["z"].logp({"z": data[2:], "y": data})
    np.testing.assert_allclose(ar_like, reg_like)
Exemple #10
0
 def test_multiple_samplers(self):
     with Model():
         prob = Beta('prob', alpha=5., beta=3.)
         Binomial('outcome', n=1, p=prob)
         # Catching warnings through multiprocessing doesn't work,
         # so we have to use single threaded sampling.
         with pytest.warns(None) as warns:
             sample(3, tune=2, discard_tuned_samples=False,
                    n_init=None, chains=1)
         messages = [warn.message.args[0] for warn in warns]
         assert any("contains only 3" in msg for msg in messages)
         assert all('boolean index did not' not in msg for msg in messages)
Exemple #11
0
    def test_parallelized_chains_are_random(self):
        with Model() as model:
            x = Normal("x", 0, 1)
            for stepper in TestPopulationSamplers.steppers:
                step = stepper()
                trace = sample(chains=4, cores=4, draws=20, tune=0, step=DEMetropolis())
                samples = np.array(trace.get_values("x", combine=False))[:, 5]

                assert len(set(samples)) == 4, "Parallelized {} " "chains are identical.".format(
                    stepper
                )
        pass
Exemple #12
0
def test_interval_missing_observations():
    with Model() as model:
        obs1 = ma.masked_values([1, 2, -1, 4, -1], value=-1)
        obs2 = ma.masked_values([-1, -1, 6, -1, 8], value=-1)

        rng = aesara.shared(np.random.RandomState(2323), borrow=True)

        with pytest.warns(ImputationWarning):
            theta1 = Uniform("theta1", 0, 5, observed=obs1, rng=rng)
        with pytest.warns(ImputationWarning):
            theta2 = Normal("theta2", mu=theta1, observed=obs2, rng=rng)

        assert "theta1_observed_interval__" in model.named_vars
        assert "theta1_missing_interval__" in model.named_vars
        assert isinstance(
            model.rvs_to_values[model.named_vars["theta1_observed"]].tag.transform, Interval
        )

        prior_trace = sample_prior_predictive()

        # Make sure the observed + missing combined deterministics have the
        # same shape as the original observations vectors
        assert prior_trace["theta1"].shape[-1] == obs1.shape[0]
        assert prior_trace["theta2"].shape[-1] == obs2.shape[0]

        # Make sure that the observed values are newly generated samples
        assert np.all(np.var(prior_trace["theta1_observed"], 0) > 0.0)
        assert np.all(np.var(prior_trace["theta2_observed"], 0) > 0.0)

        # Make sure the missing parts of the combined deterministic matches the
        # sampled missing and observed variable values
        assert np.mean(prior_trace["theta1"][:, obs1.mask] - prior_trace["theta1_missing"]) == 0.0
        assert np.mean(prior_trace["theta1"][:, ~obs1.mask] - prior_trace["theta1_observed"]) == 0.0
        assert np.mean(prior_trace["theta2"][:, obs2.mask] - prior_trace["theta2_missing"]) == 0.0
        assert np.mean(prior_trace["theta2"][:, ~obs2.mask] - prior_trace["theta2_observed"]) == 0.0

        assert {"theta1", "theta2"} <= set(prior_trace.keys())

        trace = sample(chains=1, draws=50, compute_convergence_checks=False)

        assert np.all(0 < trace["theta1_missing"].mean(0))
        assert np.all(0 < trace["theta2_missing"].mean(0))
        assert "theta1" not in trace.varnames
        assert "theta2" not in trace.varnames

        # Make sure that the observed values are newly generated samples and that
        # the observed and deterministic matche
        pp_trace = sample_posterior_predictive(trace)
        assert np.all(np.var(pp_trace["theta1"], 0) > 0.0)
        assert np.all(np.var(pp_trace["theta2"], 0) > 0.0)
        assert np.mean(pp_trace["theta1"][:, ~obs1.mask] - pp_trace["theta1_observed"]) == 0.0
        assert np.mean(pp_trace["theta2"][:, ~obs2.mask] - pp_trace["theta2_observed"]) == 0.0
Exemple #13
0
    def check_trace(self, step_method):
        """Tests whether the trace for step methods is exactly the same as on master.

        Code changes that effect how random numbers are drawn may change this, and require
        `master_samples` to be updated, but such changes should be noted and justified in the
        commit.

        This method may also be used to benchmark step methods across commits, by running, for
        example

        ```
        BENCHMARK=100000 ./scripts/test.sh -s pymc3/tests/test_step.py:TestStepMethods
        ```

        on multiple commits.
        """
        n_steps = 100
        with Model() as model:
            x = Normal('x', mu=0, sd=1)
            if step_method.__name__ == 'SMC':
                trace = smc.sample_smc(n_steps=n_steps,
                                       n_chains=2,
                                       start=[{
                                           'x': 1.
                                       }, {
                                           'x': -1.
                                       }],
                                       random_seed=1,
                                       n_jobs=1,
                                       progressbar=False,
                                       homepath=self.temp_dir)

            elif step_method.__name__ == 'NUTS':
                step = step_method(scaling=model.test_point)
                trace = sample(0,
                               tune=n_steps,
                               discard_tuned_samples=False,
                               step=step,
                               random_seed=1,
                               chains=1)
            else:
                trace = sample(0,
                               tune=n_steps,
                               discard_tuned_samples=False,
                               step=step_method(),
                               random_seed=1,
                               chains=1)
        assert_array_almost_equal(trace.get_values('x'),
                                  self.master_samples[step_method],
                                  decimal=select_by_precision(float64=6,
                                                              float32=4))
Exemple #14
0
    def check_trace(self, step_method):
        """Tests whether the trace for step methods is exactly the same as on master.

        Code changes that effect how random numbers are drawn may change this, and require
        `master_samples` to be updated, but such changes should be noted and justified in the
        commit.

        This method may also be used to benchmark step methods across commits, by running, for
        example

        ```
        BENCHMARK=100000 ./scripts/test.sh -s pymc3/tests/test_step.py:TestStepMethods
        ```

        on multiple commits.
        """
        n_steps = 100
        with Model() as model:
            x = Normal("x", mu=0, sd=1)
            y = Normal("y", mu=x, sd=1, observed=1)
            if step_method.__name__ == "SMC":
                trace = sample(draws=200,
                               random_seed=1,
                               progressbar=False,
                               step=step_method())
            elif step_method.__name__ == "NUTS":
                step = step_method(scaling=model.test_point)
                trace = sample(
                    0,
                    tune=n_steps,
                    discard_tuned_samples=False,
                    step=step,
                    random_seed=1,
                    chains=1,
                )
            else:
                trace = sample(
                    0,
                    tune=n_steps,
                    discard_tuned_samples=False,
                    step=step_method(),
                    random_seed=1,
                    chains=1,
                )

        assert_array_almost_equal(
            trace["x"],
            self.master_samples[step_method],
            decimal=select_by_precision(float64=6, float32=4),
        )
Exemple #15
0
    def test_float64(self):
        theano.config.floatX = 'float64'
        theano.config.warn_float64 = 'ignore'

        with Model() as model:
            x = Normal('x', testval=np.array(1., dtype='float64'))
            obs = Normal('obs', mu=x, sd=1., observed=np.random.randn(5))

        assert x.dtype == 'float64'
        assert obs.dtype == 'float64'

        for sampler in self.samplers:
            with model:
                sample(10, sampler())
def test_GARCH11():
    # test data ~ N(0, 1)
    data = np.array(
        [
            -1.35078362,
            -0.81254164,
            0.28918551,
            -2.87043544,
            -0.94353337,
            0.83660719,
            -0.23336562,
            -0.58586298,
            -1.36856736,
            -1.60832975,
            -1.31403141,
            0.05446936,
            -0.97213128,
            -0.18928725,
            1.62011258,
            -0.95978616,
            -2.06536047,
            0.6556103,
            -0.27816645,
            -1.26413397,
        ]
    )
    omega = 0.6
    alpha_1 = 0.4
    beta_1 = 0.5
    initial_vol = np.float64(0.9)
    vol = np.empty_like(data)
    vol[0] = initial_vol
    for i in range(len(data) - 1):
        vol[i + 1] = np.sqrt(omega + beta_1 * vol[i] ** 2 + alpha_1 * data[i] ** 2)

    with Model() as t:
        y = GARCH11(
            "y",
            omega=omega,
            alpha_1=alpha_1,
            beta_1=beta_1,
            initial_vol=initial_vol,
            shape=data.shape,
        )
        z = Normal("z", mu=0, sigma=vol, shape=data.shape)
    garch_like = t["y"].logp({"z": data, "y": data})
    reg_like = t["z"].logp({"z": data, "y": data})
    decimal = select_by_precision(float64=7, float32=4)
    np.testing.assert_allclose(garch_like, reg_like, 10 ** (-decimal))
Exemple #17
0
    def test_simple_init_layer(self):
        with Model():
            l = BLayer((10, 10))
            self.assertIsInstance(l.name, str)
            l = BLayer((10, 10), 'l2')
            self.assertEqual(len(l.vars), 1)
            self.assertIsInstance(l.name, str)
            l = BMLayer([(10, 10)], name='merge')
            self.assertIsInstance(l.name, str)
            self.assertEqual(len(l.vars), 1)
            l = BMLayer([(10, 10)])
            self.assertIsInstance(l.name, str)

        self.assertRaises(TypeError, BMLayer, (10, 10))
        self.assertRaises(TypeError, BLayer, (10, 10))
Exemple #18
0
def test_missing(data):

    with Model() as model:
        x = Normal("x", 1, 1)
        with pytest.warns(ImputationWarning):
            y = Normal("y", x, 1, observed=data)

    assert "y_missing" in model.named_vars

    test_point = model.initial_point
    assert not np.isnan(model.logp(test_point))

    with model:
        prior_trace = sample_prior_predictive()
    assert {"x", "y"} <= set(prior_trace.keys())
Exemple #19
0
    def test_linalg(self):
        with Model():
            a = Normal('a', shape=2)
            a = tt.switch(a > 0, np.inf, a)
            b = tt.slinalg.solve(floatX(np.eye(2)), a)
            Normal('c', mu=b, shape=2)
            with warnings.catch_warnings(record=True) as warns:
                trace = sample(20, init=None, tune=5)
            assert np.any(trace['diverging'])
            assert any('diverging samples after tuning' in str(warn.message)
                       for warn in warns)
            assert any('contains only' in str(warn.message) for warn in warns)

            with pytest.raises(SamplingError):
                sample(20, init=None, nuts_kwargs={'on_error': 'raise'})
Exemple #20
0
 def test_tuning_none(self):
     with Model() as pmodel:
         Normal('n', 0, 2, shape=(3, ))
         trace = sample(tune=1000,
                        draws=500,
                        step=DEMetropolisZ(tune=None),
                        cores=1,
                        chains=2,
                        discard_tuned_samples=False)
     for c in range(trace.nchains):
         # check that all tunable parameters remained constant
         assert len(set(trace.get_sampler_stats('lambda', chains=c))) == 1
         assert len(set(trace.get_sampler_stats('scaling', chains=c))) == 1
         assert set(trace.get_sampler_stats('tune',
                                            chains=c)) == {True, False}
     pass
Exemple #21
0
def test_missing_dual_observations():
    with Model() as model:
        obs1 = ma.masked_values([1, 2, -1, 4, -1], value=-1)
        obs2 = ma.masked_values([-1, -1, 6, -1, 8], value=-1)
        beta1 = Normal("beta1", 1, 1)
        beta2 = Normal("beta2", 2, 1)
        latent = Normal("theta", size=5)
        with pytest.warns(ImputationWarning):
            ovar1 = Normal("o1", mu=beta1 * latent, observed=obs1)
        with pytest.warns(ImputationWarning):
            ovar2 = Normal("o2", mu=beta2 * latent, observed=obs2)

        prior_trace = sample_prior_predictive()
        assert {"beta1", "beta2", "theta", "o1", "o2"} <= set(prior_trace.keys())
        # TODO: Assert something
        trace = sample(chains=1, draws=50)
Exemple #22
0
 def test_tuning_reset(self):
     """Re-use of the step method instance with cores=1 must not leak tuning information between chains."""
     with Model() as pmodel:
         D = 3
         Normal('n', 0, 2, shape=(D, ))
         trace = sample(tune=600,
                        draws=500,
                        step=Metropolis(tune=True, scaling=0.1),
                        cores=1,
                        chains=3,
                        discard_tuned_samples=False)
     for c in range(trace.nchains):
         # check that the tuned settings changed and were reset
         assert trace.get_sampler_stats('scaling', chains=c)[0] == 0.1
         assert trace.get_sampler_stats('scaling', chains=c)[-1] != 0.1
     pass
Exemple #23
0
 def test_tuning_lambda_sequential(self):
     with Model() as pmodel:
         Normal('n', 0, 2, shape=(3, ))
         trace = sample(tune=1000,
                        draws=500,
                        step=DEMetropolisZ(tune='lambda', lamb=0.92),
                        cores=1,
                        chains=3,
                        discard_tuned_samples=False)
     for c in range(trace.nchains):
         # check that the tuned settings changed and were reset
         assert trace.get_sampler_stats('lambda', chains=c)[0] == 0.92
         assert trace.get_sampler_stats('lambda', chains=c)[-1] != 0.92
         assert set(trace.get_sampler_stats('tune',
                                            chains=c)) == {True, False}
     pass
Exemple #24
0
 def test_tuning_epsilon_parallel(self):
     with Model() as pmodel:
         Normal('n', 0, 2, shape=(3, ))
         trace = sample(tune=1000,
                        draws=500,
                        step=DEMetropolisZ(tune='scaling', scaling=0.002),
                        cores=2,
                        chains=2,
                        discard_tuned_samples=False)
     for c in range(trace.nchains):
         # check that the tuned settings changed and were reset
         assert trace.get_sampler_stats('scaling', chains=c)[0] == 0.002
         assert trace.get_sampler_stats('scaling', chains=c)[-1] != 0.002
         assert set(trace.get_sampler_stats('tune',
                                            chains=c)) == {True, False}
     pass
Exemple #25
0
def test_missing_with_predictors():
    predictors = array([0.5, 1, 0.5, 2, 0.3])
    data = ma.masked_values([1, 2, -1, 4, -1], value=-1)
    with Model() as model:
        x = Normal("x", 1, 1)
        with pytest.warns(ImputationWarning):
            y = Normal("y", x * predictors, 1, observed=data)

    assert "y_missing" in model.named_vars

    test_point = model.initial_point
    assert not np.isnan(model.logp(test_point))

    with model:
        prior_trace = sample_prior_predictive()
    assert {"x", "y"} <= set(prior_trace.keys())
Exemple #26
0
    def test_demcmc_tune_parameter(self):
        """Tests that validity of the tune setting is checked"""
        with Model() as model:
            Normal("n", mu=0, sigma=1, shape=(2,3))
            
            step = DEMetropolis()
            assert step.tune is None

            step = DEMetropolis(tune='scaling')
            assert step.tune == 'scaling'

            step = DEMetropolis(tune='lambda')
            assert step.tune == 'lambda'

            with pytest.raises(ValueError):
                DEMetropolis(tune='foo')
        pass
Exemple #27
0
 def test_tune_drop_fraction(self):
     tune = 300
     tune_drop_fraction = 0.85
     draws = 200
     with Model() as pmodel:
         Normal('n', 0, 2, shape=(3, ))
         step = DEMetropolisZ(tune_drop_fraction=tune_drop_fraction)
         trace = sample(tune=tune,
                        draws=draws,
                        step=step,
                        cores=1,
                        chains=1,
                        discard_tuned_samples=False)
         assert len(trace) == tune + draws
         assert len(
             step._history) == (tune - tune * tune_drop_fraction) + draws
     pass
Exemple #28
0
    def test_posterior_estimate(self):
        alpha_true, sigma_true = 1., 0.5
        beta_true = 1.

        size = 1000

        X = np.random.randn(size)
        Y = alpha_true + beta_true * X + np.random.randn(size) * sigma_true

        decimal = 1
        with Model() as model:
            alpha = Normal('alpha', mu=0, sd=100, testval=alpha_true)
            beta = Normal('beta', mu=0, sd=100, testval=beta_true)
            sigma = InverseGamma('sigma', 10., testval=sigma_true)
            mu = alpha + beta * X
            Y_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)

            for step_method, params in ((NUTS, {
                    "target_accept": 0.95
            }), (Slice, {}), (Metropolis, {
                    'scaling': 10.
            })):
                trace = sample(100000,
                               step=step_method(**params),
                               progressbar=False,
                               tune=1000)
                trace_ = trace[-300::5]

                # We do the same for beta - using more burnin.
                np.testing.assert_almost_equal(np.mean(trace_.alpha),
                                               alpha_true,
                                               decimal=decimal)
                np.testing.assert_almost_equal(np.mean(trace_.beta),
                                               beta_true,
                                               decimal=decimal)
                np.testing.assert_almost_equal(np.mean(trace_.sigma),
                                               sigma_true,
                                               decimal=decimal)

                # Make sure posteriors are normal
                _, p_alpha = stats.normaltest(trace_.alpha)
                _, p_beta = stats.normaltest(trace_.beta)
                # p-values should be > .05 to indiciate
                np.testing.assert_array_less(0.05, p_alpha, verbose=True)
                np.testing.assert_array_less(0.05, p_beta, verbose=True)
Exemple #29
0
    def test_linalg(self, caplog):
        with Model():
            a = Normal('a', shape=2)
            a = tt.switch(a > 0, np.inf, a)
            b = tt.slinalg.solve(floatX(np.eye(2)), a)
            Normal('c', mu=b, shape=2)
            caplog.clear()
            trace = sample(20, init=None, tune=5, chains=2)
            warns = [msg.msg for msg in caplog.records]
            assert np.any(trace['diverging'])
            assert (any('divergences after tuning' in warn for warn in warns)
                    or any('only diverging samples' in warn for warn in warns))

            with pytest.raises(ValueError) as error:
                trace.report.raise_ok()
            error.match('issues during sampling')

            assert not trace.report.ok
Exemple #30
0
    def test_normal_nograd_op(self):
        """Test normal distribution without an implemented gradient is assigned slice method"""
        with Model() as model:
            x = Normal('x', 0, 1)

            # a custom Theano Op that does not have a grad:
            is_64 = theano.config.floatX == "float64"
            itypes = [tt.dscalar] if is_64 else [tt.fscalar]
            otypes = [tt.dscalar] if is_64 else [tt.fscalar]
            @theano.as_op(itypes, otypes)
            def kill_grad(x):
                return x

            data = np.random.normal(size=(100,))
            Normal("y", mu=kill_grad(x), sd=1, observed=data.astype(theano.config.floatX))

            steps = assign_step_methods(model, [])
        assert isinstance(steps, Slice)