Пример #1
0
    def test_multiple_subsampling_rates(self):
        """Test that when you give a signle integer it is applied to all levels and
        when you give a list the list is applied correctly."""
        with Model() as coarse_model_0:
            Normal("n", 0, 2.2, shape=(3, ))
        with Model() as coarse_model_1:
            Normal("n", 0, 2.1, shape=(3, ))
        with Model():
            Normal("n", 0, 2.0, shape=(3, ))

            step_1 = MLDA(coarse_models=[coarse_model_0, coarse_model_1],
                          subsampling_rates=3)
            assert len(step_1.subsampling_rates) == 2
            assert step_1.subsampling_rates[0] == step_1.subsampling_rates[
                1] == 3

            step_2 = MLDA(coarse_models=[coarse_model_0, coarse_model_1],
                          subsampling_rates=[3, 4])
            assert step_2.subsampling_rates[0] == 3
            assert step_2.subsampling_rates[1] == 4

            with pytest.raises(ValueError):
                step_3 = MLDA(
                    coarse_models=[coarse_model_0, coarse_model_1],
                    subsampling_rates=[3, 4, 10],
                )
Пример #2
0
    def test_proposal_and_base_proposal_choice(self):
        """Test that proposal_dist and base_proposal_dist are set as
        expected by MLDA"""
        _, model, _ = mv_simple()
        _, model_coarse, _ = mv_simple_coarse()
        with model:
            sampler = MLDA(coarse_models=[model_coarse])
            assert isinstance(sampler.proposal_dist, RecursiveDAProposal)
            assert sampler.base_proposal_dist is None
            assert isinstance(sampler.next_step_method.proposal_dist,
                              NormalProposal)

            s = np.ones(model.ndim)
            sampler = MLDA(coarse_models=[model_coarse], base_S=s)
            assert isinstance(sampler.proposal_dist, RecursiveDAProposal)
            assert sampler.base_proposal_dist is None
            assert isinstance(sampler.next_step_method.proposal_dist,
                              NormalProposal)

            s = np.diag(s)
            sampler = MLDA(coarse_models=[model_coarse], base_S=s)
            assert isinstance(sampler.proposal_dist, RecursiveDAProposal)
            assert sampler.base_proposal_dist is None
            assert isinstance(sampler.next_step_method.proposal_dist,
                              MultivariateNormalProposal)

            s[0, 0] = -s[0, 0]
            with pytest.raises(np.linalg.LinAlgError):
                MLDA(coarse_models=[model_coarse], base_S=s)
Пример #3
0
    def test_tuning_and_scaling_off(self):
        """Test that tuning is deactivated when sample()'s tune=0 and that
        MLDA's tune=False is overridden by sample()'s tune."""
        np.random.seed(12345)
        _, model = simple_2model_continuous()
        _, model_coarse = simple_2model_continuous()

        ts_0 = 0
        with model:
            trace_0 = sample(
                tune=ts_0,
                draws=100,
                step=MLDA(
                    coarse_models=[model_coarse],
                    base_tune_interval=50,
                    base_scaling=100.0,
                    tune=False,
                ),
                chains=1,
                discard_tuned_samples=False,
                random_seed=12345,
            )

        ts_1 = 100
        with model:
            trace_1 = sample(
                tune=ts_1,
                draws=20,
                step=MLDA(
                    coarse_models=[model_coarse],
                    base_tune_interval=50,
                    base_scaling=100.0,
                    tune=False,
                ),
                chains=1,
                discard_tuned_samples=False,
                random_seed=12345,
            )

        assert not trace_0.get_sampler_stats("tune", chains=0)[0]
        assert not trace_0.get_sampler_stats("tune", chains=0)[-1]
        assert (trace_0.get_sampler_stats("base_scaling", chains=0)[0][0] ==
                trace_0.get_sampler_stats("base_scaling", chains=0)[-1][0] ==
                trace_0.get_sampler_stats("base_scaling", chains=0)[0][1] ==
                trace_0.get_sampler_stats("base_scaling",
                                          chains=0)[-1][1] == 100.0)

        assert trace_1.get_sampler_stats("tune", chains=0)[0]
        assert trace_1.get_sampler_stats("tune", chains=0)[ts_1 - 1]
        assert not trace_1.get_sampler_stats("tune", chains=0)[ts_1]
        assert not trace_1.get_sampler_stats("tune", chains=0)[-1]
        assert trace_1.get_sampler_stats("base_scaling",
                                         chains=0)[0][0] == 100.0
        assert trace_1.get_sampler_stats("base_scaling",
                                         chains=0)[0][1] == 100.0
        assert trace_1.get_sampler_stats("base_scaling",
                                         chains=0)[-1][0] < 100.0
        assert trace_1.get_sampler_stats("base_scaling",
                                         chains=0)[-1][1] < 100.0
Пример #4
0
 def test_step_continuous(self):
     start, model, (mu, C) = mv_simple()
     unc = np.diag(C)**0.5
     check = (("x", np.mean, mu, unc / 10.0), ("x", np.std, unc,
                                               unc / 10.0))
     _, model_coarse, _ = mv_simple_coarse()
     with model:
         steps = (Slice(),
                  HamiltonianMC(scaling=C, is_cov=True, blocked=False),
                  NUTS(scaling=C, is_cov=True, blocked=False),
                  Metropolis(S=C,
                             proposal_dist=MultivariateNormalProposal,
                             blocked=True), Slice(blocked=True),
                  HamiltonianMC(scaling=C,
                                is_cov=True), NUTS(scaling=C, is_cov=True),
                  CompoundStep([
                      HamiltonianMC(scaling=C, is_cov=True),
                      HamiltonianMC(scaling=C, is_cov=True, blocked=False),
                  ]),
                  MLDA(coarse_models=[model_coarse],
                       base_S=C,
                       base_proposal_dist=MultivariateNormalProposal))
     for step in steps:
         trace = sample(
             0,
             tune=8000,
             chains=1,
             discard_tuned_samples=False,
             step=step,
             start=start,
             model=model,
             random_seed=1,
         )
         self.check_stat(check, trace, step.__class__.__name__)
Пример #5
0
 def test_competence(self, variable, has_grad, outcome):
     """Test if competence function returns expected
     results for different models"""
     with Model() as pmodel:
         Normal("n", 0, 2, shape=(3, ))
         Binomial("b", n=2, p=0.3)
     assert MLDA.competence(pmodel[variable], has_grad=has_grad) == outcome
Пример #6
0
    def test_tuning_and_scaling_on(self):
        """Test that tune and base_scaling change as expected when
        tuning is on."""
        np.random.seed(1234)
        ts = 100
        _, model = simple_2model_continuous()
        _, model_coarse = simple_2model_continuous()
        with model:
            trace = sample(
                tune=ts,
                draws=20,
                step=MLDA(
                    coarse_models=[model_coarse],
                    base_tune_interval=50,
                    base_scaling=100.0,
                ),
                chains=1,
                discard_tuned_samples=False,
                random_seed=1234,
            )

        assert trace.get_sampler_stats("tune", chains=0)[0]
        assert trace.get_sampler_stats("tune", chains=0)[ts - 1]
        assert not trace.get_sampler_stats("tune", chains=0)[ts]
        assert not trace.get_sampler_stats("tune", chains=0)[-1]
        assert trace.get_sampler_stats("base_scaling", chains=0)[0][0] == 100.0
        assert trace.get_sampler_stats("base_scaling", chains=0)[0][1] == 100.0
        assert trace.get_sampler_stats("base_scaling", chains=0)[-1][0] < 100.0
        assert trace.get_sampler_stats("base_scaling", chains=0)[-1][1] < 100.0
Пример #7
0
    def test_acceptance_rate_against_coarseness(self):
        """Test that the acceptance rate increases
        when the coarse model is closer to
        the fine model."""
        with Model() as coarse_model_0:
            Normal("x", 5.0, 1.0)

        with Model() as coarse_model_1:
            Normal("x", 6.0, 2.0)

        with Model() as coarse_model_2:
            Normal("x", 20.0, 5.0)

        possible_coarse_models = [
            coarse_model_0, coarse_model_1, coarse_model_2
        ]
        acc = []

        with Model():
            Normal("x", 5.0, 1.0)
            for coarse_model in possible_coarse_models:
                step = MLDA(coarse_models=[coarse_model],
                            subsampling_rates=3,
                            tune=True)
                trace = sample(chains=1, draws=500, tune=100, step=step)
                acc.append(trace.get_sampler_stats("accepted").mean())
            assert acc[0] > acc[1] > acc[2], ("Acceptance rate is not "
                                              "strictly increasing when"
                                              "coarse model is closer to "
                                              "fine model. Acceptance rates"
                                              "were: {}".format(acc))
Пример #8
0
    def test_exceptions_coarse_models(self):
        """Test that MLDA generates the expected exceptions when no coarse_models arg
        is passed, an empty list is passed or when coarse_models is not a list"""
        with pytest.raises(TypeError):
            _, model, _ = mv_simple()
            with model:
                MLDA()

        with pytest.raises(ValueError):
            _, model, _ = mv_simple()
            with model:
                MLDA(coarse_models=[])

        with pytest.raises(ValueError):
            _, model, _ = mv_simple()
            with model:
                MLDA(coarse_models=(model, model))
Пример #9
0
 def test_step_methods_in_each_level(self):
     """Test that MLDA creates the correct hierarchy of step methods when no
     coarse models are passed and when two coarse models are passed."""
     _, model, _ = mv_simple()
     _, model_coarse, _ = mv_simple_coarse()
     _, model_very_coarse, _ = mv_simple_very_coarse()
     with model:
         s = np.ones(model.ndim) + 2.0
         sampler = MLDA(coarse_models=[model_very_coarse, model_coarse],
                        base_S=s)
         assert isinstance(sampler.next_step_method, MLDA)
         assert isinstance(sampler.next_step_method.next_step_method,
                           Metropolis)
         assert np.all(
             sampler.next_step_method.next_step_method.proposal_dist.s == s)
Пример #10
0
 def test_trace_length(self):
     """Check if trace length is as expected."""
     tune = 100
     draws = 50
     with Model() as coarse_model:
         Normal("n", 0, 2.2, shape=(3, ))
     with Model():
         Normal("n", 0, 2, shape=(3, ))
         step = MLDA(coarse_models=[coarse_model])
         trace = sample(tune=tune,
                        draws=draws,
                        step=step,
                        chains=1,
                        discard_tuned_samples=False)
         assert len(trace) == tune + draws
Пример #11
0
    def test_float32_MLDA(self):
        data = np.random.randn(5).astype("float32")

        with Model() as coarse_model:
            x = Normal("x", testval=np.array(1.0, dtype="float32"))
            obs = Normal("obs", mu=x, sigma=1.0, observed=data + 0.5)

        with Model() as model:
            x = Normal("x", testval=np.array(1.0, dtype="float32"))
            obs = Normal("obs", mu=x, sigma=1.0, observed=data)

        assert x.dtype == "float32"
        assert obs.dtype == "float32"

        with model:
            sample(10, MLDA(coarse_models=[coarse_model]))
Пример #12
0
    def test_float32_MLDA(self):
        data = np.random.randn(5).astype('float32')

        with Model() as coarse_model:
            x = Normal('x', testval=np.array(1., dtype='float32'))
            obs = Normal('obs', mu=x, sigma=1., observed=data + 0.5)

        with Model() as model:
            x = Normal('x', testval=np.array(1., dtype='float32'))
            obs = Normal('obs', mu=x, sigma=1., observed=data)

        assert x.dtype == 'float32'
        assert obs.dtype == 'float32'

        with model:
            sample(10, MLDA(coarse_models=[coarse_model]))