Ejemplo n.º 1
0
    def test_serialize_and_load(self, shape):
        def make_model(context_):
            alpha = context_.named_parameter(
                "alpha", inf.Prior(Normal, loc=0.0, scale=1.0))
            beta = context_.named_parameter(
                "beta", inf.Prior(LogNormal, loc=0.0, scale=1.0))

            return ts.models.OrnsteinUhlenbeck(alpha, beta, beta)

        with inf.make_context() as context:
            make_model(context)

            context.initialize_parameters(shape)
            as_state = context.state_dict()

        with inf.make_context() as new_context:
            model = make_model(new_context)
            new_context.load_state_dict(as_state)

            assert (context.stack_parameters() ==
                    new_context.stack_parameters()).all()

            for p1, p2 in zip(model.functional_parameters(),
                              new_context.parameters.values()):
                assert p1 is p2
Ejemplo n.º 2
0
    def test_exchange_context(self):
        with inf.make_context() as context:
            alpha = context.named_parameter(
                "alpha", inf.Prior(Normal, loc=0.0, scale=1.0))
            beta = context.named_parameter(
                "beta", inf.Prior(LogNormal, loc=0.0, scale=1.0))

            context.initialize_parameters(batch_shape)

            with context.make_new() as sub_context:
                alpha_sub = sub_context.named_parameter(
                    "alpha", inf.Prior(Normal, loc=0.0, scale=1.0))
                beta_sub = sub_context.named_parameter(
                    "beta", inf.Prior(LogNormal, loc=0.0, scale=1.0))

                assert alpha_sub is not alpha and beta_sub is not beta

                sub_context.initialize_parameters(batch_shape)

                mask: torch.BoolTensor = (torch.empty(
                    batch_shape[0]).normal_() > 0.0).bool()

                context.exchange(sub_context, mask)

            assert (alpha[mask]
                    == alpha_sub[mask]).all() and (beta[mask]
                                                   == beta_sub[mask]).all()
Ejemplo n.º 3
0
 def test_verify_not_batched(self):
     with inf.make_context() as context:
         with pytest.raises(AssertionError):
             beta = context.named_parameter(
                 "beta",
                 inf.Prior(LogNormal,
                           loc=torch.zeros(1),
                           scale=torch.ones(1)))
Ejemplo n.º 4
0
    def test_apply_fun(self, shape):
        with inf.make_context() as context:

            beta = context.named_parameter(
                "beta", inf.Prior(LogNormal, loc=0.0, scale=1.0))
            context.initialize_parameters(shape)

            sub_context = context.apply_fun(lambda u: u.mean())
            for p, v in sub_context.parameters.items():
                assert v.shape == torch.Size([])
Ejemplo n.º 5
0
    def test_sample_parameters(self):
        with inf.make_context() as context:
            alpha = context.named_parameter(
                "alpha", inf.Prior(Normal, loc=0.0, scale=1.0))
            beta = context.named_parameter(
                "beta", inf.Prior(LogNormal, loc=0.0, scale=1.0))

            assert isinstance(context.get_parameter(alpha._name), inf.PriorBoundParameter) and \
                   isinstance(context.get_parameter(beta._name), inf.PriorBoundParameter)

            context.initialize_parameters(batch_shape)

            assert alpha.shape == batch_shape and beta.shape == batch_shape
Ejemplo n.º 6
0
    def test_assert_sampling_multiple_same(self):
        with inf.make_context() as context:
            alpha = context.named_parameter(
                "alpha", inf.Prior(Normal, loc=0.0, scale=1.0))
            beta = context.named_parameter(
                "beta", inf.Prior(LogNormal, loc=0.0, scale=1.0))

            alpha2 = context.named_parameter(
                "alpha", inf.Prior(Normal, loc=0.0, scale=1.0))

            with pytest.raises(AssertionError):
                alpha = context.named_parameter(
                    "alpha", inf.Prior(Normal, loc=0.0, scale=2.0))
Ejemplo n.º 7
0
    def test_check_inactive_context_raises(self, models, filter_):
        model, _ = models

        from pyfilter.inference import make_context

        context = make_context()

        def model_builder(context_):
            return model

        # with pytest.raises()
        with pytest.raises(Exception):
            f = filter_(model_builder)
Ejemplo n.º 8
0
    def test_algorithms(self, models, algorithm, callback):
        torch.manual_seed(123)

        true_model, build_model = models
        _, y = true_model.sample_states(100).get_paths()

        with inf.make_context() as context:
            filter_ = filts.APF(build_model, 200)
            alg = algorithm(filter_)

            alg.register_callback(callback)

            # TODO: Add something to test
            result = alg.fit(y)
Ejemplo n.º 9
0
    def test_multidimensional_parameters(self, shape):
        with inf.make_context() as context:
            alpha = context.named_parameter(
                "alpha",
                inf.Prior(Normal,
                          loc=torch.zeros((2, 2)),
                          scale=torch.ones((2, 2)),
                          reinterpreted_batch_ndims=2))
            beta = context.named_parameter(
                "beta", inf.Prior(LogNormal, loc=0.0, scale=1.0))
            context.initialize_parameters(shape)

            stacked = context.stack_parameters()
            assert stacked.shape == torch.Size([shape.numel(), 5])
Ejemplo n.º 10
0
    def test_algorithms_serialize(self, models, algorithm, callback):
        torch.manual_seed(123)

        true_model, build_model = models
        _, y = true_model.sample_states(100).get_paths()

        train_split = y.shape[0] // 2
        particles = 250
        with inf.make_context() as context:
            filter_ = filts.APF(build_model, particles)
            alg = algorithm(filter_)

            alg.register_callback(callback)

            result = alg.fit(y[:train_split])

            algorithm_state = result.state_dict()
            context_state = context.state_dict()

        with inf.make_context() as new_context:
            new_filter = filts.APF(build_model, particles)
            new_context.load_state_dict(context_state)

            new_alg = algorithm(new_filter)
            new_result = new_alg.initialize()
            new_result.load_state_dict(algorithm_state)

            assert ((new_result.ess == result.ess).all()
                    and (new_result.w == result.w).all())

            for yt in y[train_split:]:
                new_result = new_alg.step(yt, new_result)

            assert ((new_result.ess.shape[0] == y.shape[0] + 1)
                    and (new_result.filter_state.latest_state.x.time_index
                         == y.shape[0]).all())
Ejemplo n.º 11
0
    def test_pmcmc(self, models, kernel_and_record_states):
        true_model, build_model = models
        _, y = true_model.sample_states(50).get_paths()

        with inf.make_context() as context:
            kernel, record_states = kernel_and_record_states
            filter_ = filts.APF(lambda u: build_model(u, use_cuda=False), 150, record_states=record_states)

            # TODO: Just make sure it runs
            pmcmc = inf.batch.mcmc.PMMH(filter_, 100, initializer="mean", proposal=kernel)

            result = pmcmc.fit(y)

            # TODO: Add something to test
            print()
Ejemplo n.º 12
0
    def test_make_model_and_resample(self):
        with inf.make_context() as context:
            model = build_model(context)

            context.initialize_parameters(batch_shape)

            old_dict = {k: v.clone() for k, v in context.parameters.items()}

            indices: torch.IntTensor = torch.randint(low=0,
                                                     high=batch_shape[0],
                                                     size=batch_shape[:1])
            context.resample(indices)

            for p_model, (n, p) in zip(model.hidden.functional_parameters(),
                                       context.get_parameters()):
                assert (p == old_dict[n][indices]).all() and (p_model is p)
Ejemplo n.º 13
0
    def test_resample_context(self):
        with inf.make_context() as context:
            alpha = context.named_parameter(
                "alpha", inf.Prior(Normal, loc=0.0, scale=1.0))
            beta = context.named_parameter(
                "beta", inf.Prior(LogNormal, loc=0.0, scale=1.0))

            context.initialize_parameters(batch_shape)

            indices: torch.IntTensor = torch.randint(low=0,
                                                     high=batch_shape[0],
                                                     size=batch_shape[:1])

            alpha_clone = alpha.clone()
            beta_clone = beta.clone()
            context.resample(indices)

            assert (alpha == alpha_clone[indices]).all() and (
                beta == beta_clone[indices]).all()
Ejemplo n.º 14
0
    def test_initialize_parameter(self, batch_shape):
        with inf.make_context() as cntxt:
            prior = inf.Prior(Normal, loc=0.0, scale=1.0)
            parameter = cntxt.named_parameter("kappa", prior)

            sts = StructuralStochasticProcess((parameter, ), None)
            assert (next(sts.parameters()) is
                    parameter) and (cntxt.get_parameter("kappa") is parameter)

            for p in sts.parameters():
                p.sample_(batch_shape)
                assert (p is parameter) and (p.shape == batch_shape)

            if not torch.cuda.is_available():
                return

            sts = sts.cuda()
            assert (next(sts.parameters()) is
                    parameter) and (cntxt.get_parameter("kappa") is parameter)

            for p in sts.parameters():
                p.sample_(batch_shape)
                assert (p is parameter) and (p.shape == batch_shape)
Ejemplo n.º 15
0
    def test_assert_fails_register_inactive(self):
        context = inf.make_context()

        with pytest.raises(AssertionError):
            a = context.named_parameter("a",
                                        inf.Prior(Normal, loc=0.0, scale=1.0))