示例#1
0
    def jump(self, xi, t):
        n = xi.size(0)
        # spatial multivariate gaussian
        m_gauss = MultivariateNormal(self.mu_jump * torch.ones(n),
                                     self.std_jump * torch.eye(n))
        # poisson process, probability of arrival at time t
        exp_d = Exponential(self.lambd)
        # independent events, mult probabilities
        p = torch.exp(m_gauss.log_prob(xi)) * (
            1 - torch.exp(exp_d.log_prob(self.last_jump)))

        # one sample from bernoulli trial
        event = Bernoulli(p).sample([1])

        if event:
            coord_before = xi
            xi = self.jump_event(xi, t)  # flatten resulting sampled location
            coord_after = xi
            # saving jump coordinate info
            self.log_jump(t, coord_before, coord_after)

            self.last_jump = 0
        # if no jump, increase counter for bern trial
        else:
            self.last_jump += 1

        return xi
示例#2
0
    def test_Algorithms(self):
        priors = Exponential(2.), Exponential(2.)
        # ===== Test for multiple models ===== #
        hidden1d = AffineModel((f0, g0), (f, g), priors, (self.linear.noise0, self.linear.noise))
        oned = LinearGaussianObservations(hidden1d, 1., Exponential(1.))

        hidden2d = AffineModel((f0mvn, g0mvn), (fmvn, gmvn), priors, (self.mvn.noise0, self.mvn.noise))
        twod = LinearGaussianObservations(hidden2d, self.a, Exponential(1.))

        # ====== Run inference ===== #
        for trumod, model in [(self.model, oned), (self.mvnmodel, twod)]:
            x, y = trumod.sample(550)

            algs = [
                (NESS, {'particles': 1000, 'filter_': SISR(model.copy(), 200)}),
                (SMC2, {'particles': 1000, 'filter_': SISR(model.copy(), 200)}),
                (NESSMC2, {'particles': 1000, 'filter_': SISR(model.copy(), 200)}),
                (IteratedFilteringV2, {'particles': 1000, 'filter_': SISR(model.copy(), 1000)})
            ]

            for alg, props in algs:
                alg = alg(**props).initialize()

                alg = alg.fit(y)

                parameter = alg.filter.ssm.hidden.theta[-1]

                kde = parameter.get_kde(transformed=False)

                tru_val = trumod.hidden.theta[-1]
                densval = kde.logpdf(tru_val.numpy().reshape(-1, 1))
                priorval = parameter.dist.log_prob(tru_val)

                assert bool(densval > priorval.numpy())
 def test_exponential_shape_tensor_param(self):
     expon = Exponential(torch.Tensor([1, 1]))
     self.assertEqual(expon._batch_shape, torch.Size((2,)))
     self.assertEqual(expon._event_shape, torch.Size(()))
     self.assertEqual(expon.sample().size(), torch.Size((2,)))
     self.assertEqual(expon.sample((3, 2)).size(), torch.Size((3, 2, 2)))
     self.assertEqual(expon.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
     self.assertRaises(ValueError, expon.log_prob, self.tensor_sample_2)
 def test_exponential_shape_scalar_param(self):
     expon = Exponential(1.)
     self.assertEqual(expon._batch_shape, torch.Size())
     self.assertEqual(expon._event_shape, torch.Size())
     self.assertEqual(expon.sample().size(), torch.Size((1,)))
     self.assertEqual(expon.sample((3, 2)).size(), torch.Size((3, 2)))
     self.assertRaises(ValueError, expon.log_prob, self.scalar_sample)
     self.assertEqual(expon.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))
     self.assertEqual(expon.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))
示例#5
0
    def test_ParameterInDistribution(self):
        shape = 10, 100

        a = 1e-2 * torch.ones((shape[0], 1))
        dt = 1e-2
        dist = Normal(loc=0., scale=Parameter(Exponential(10.)))

        init = Normal(a, 1.)
        sde = AffineEulerMaruyama((f_sde, g_sde), (a, 0.15),
                                  init,
                                  dist,
                                  dt=dt,
                                  num_steps=10)

        sde.sample_params(shape)

        # ===== Initialize ===== #
        x = sde.i_sample(shape)

        # ===== Propagate ===== #
        num = 1000
        samps = [x]
        for t in range(num):
            samps.append(sde.propagate(samps[-1]))

        samps = torch.stack(samps)
        self.assertEqual(samps.size(), torch.Size([num + 1, *shape]))

        # ===== Sample path ===== #
        path = sde.sample_path(num + 1, shape)
        self.assertEqual(samps.shape, path.shape)
示例#6
0
    def test_Inference(self):
        # ===== Distributions ===== #
        dist = Normal(0., 1.)
        mvn = Independent(Normal(torch.zeros(2), torch.ones(2)), 1)

        # ===== Define model ===== #
        linear = AffineProcess((f, g), (1., 0.25), dist, dist)
        model = LinearGaussianObservations(linear, scale=0.1)

        mv_linear = AffineProcess((fmvn, gmvn), (0.5, 0.25), mvn, mvn)
        mvnmodel = LinearGaussianObservations(mv_linear, torch.eye(2), scale=0.1)

        # ===== Test for multiple models ===== #
        priors = Exponential(1.), LogNormal(0., 1.)

        hidden1d = AffineProcess((f, g), priors, dist, dist)
        oned = LinearGaussianObservations(hidden1d, 1., scale=0.1)

        hidden2d = AffineProcess((fmvn, gmvn), priors, mvn, mvn)
        twod = LinearGaussianObservations(hidden2d, torch.eye(2), scale=0.1 * torch.ones(2))

        particles = 1000
        # ====== Run inference ===== #
        for trumod, model in [(model, oned), (mvnmodel, twod)]:
            x, y = trumod.sample_path(1000)

            algs = [
                (NESS, {'particles': particles, 'filter_': APF(model.copy(), 200)}),
                (NESS, {'particles': particles, 'filter_': UKF(model.copy())}),
                (SMC2, {'particles': particles, 'filter_': APF(model.copy(), 200)}),
                (SMC2FW, {'particles': particles, 'filter_': APF(model.copy(), 200)}),
                (NESSMC2, {'particles': particles, 'filter_': APF(model.copy(), 200)})
            ]

            for alg, props in algs:
                alg = alg(**props).initialize()

                alg = alg.fit(y)

                w = normalize(alg._w_rec if hasattr(alg, '_w_rec') else torch.ones(particles))

                tru_params = trumod.hidden.theta._cont + trumod.observable.theta._cont
                inf_params = alg.filter.ssm.hidden.theta._cont + alg.filter.ssm.observable.theta._cont

                for trup, p in zip(tru_params, inf_params):
                    if not p.trainable:
                        continue

                    kde = p.get_kde(weights=w)

                    transed = p.bijection.inv(trup)
                    densval = kde.logpdf(transed.numpy().reshape(-1, 1))
                    priorval = p.distr.log_prob(trup)

                    assert (densval > priorval.numpy()).all()
示例#7
0
    def _make_ma_mdp(self):
        joint_action_shape = self.joint_action_shape
        n_states = self.n_states
        n_agents = len(joint_action_shape)
        rand = self.rand

        # Reward perturbation
        perturbation = mu.unsqueeze(
            self.reward_perturbation, -1,
            n_states + 3 - self.reward_perturbation.dim())
        # Generate transition probability tensor
        trans_prob = th.rand(n_states,
                             *joint_action_shape,
                             n_states,
                             generator=rand)
        # Acyclic (episodic) MDP
        if self.acyclic:
            states_idx, next_states_idx = th.tril_indices(n_states)
            trans_prob[states_idx, ..., next_states_idx] = 0
        # Normalize transition probability matrix
        trans_prob /= trans_prob.sum(dim=-1, keepdim=True)
        trans_prob[th.isnan(trans_prob)] = 0

        # Generate random reward (following method ensures enough variance in rewards)
        # 1) Generate rewards "core" for state, joint actions and agents
        rewards = th.randn(n_states,
                           *joint_action_shape,
                           1,
                           n_agents,
                           generator=rand)
        # 2) Multiply "core" by scales to generate different rewards for next state
        scales_dist = Exponential(th.tensor(1.))
        with mu.use_rand(rand):
            rewards *= scales_dist.sample(
                (n_states, *joint_action_shape, n_states, n_agents))
        # 3) Correlate rewards
        rewards = rewards @ self.reward_correlation

        ## Transition probability
        self._trans_prob = trans_prob
        ## Rewards for state-joint actions
        self._rewards = rewards
 def forward(self, theta, n_samp=1):
     n_exp = theta.shape[0]
     n_samp = torch.Size([n_samp, 1])
     unit = torch.ones(n_exp).to(self.device)
     with torch.autograd.no_grad():
         d0 = Normal(theta[:, 0], unit)
         z0 = d0.sample(n_samp).permute(2, 0, 1)
         d1 = Normal(3 * unit, torch.exp(theta[:, 1] / 3))
         z1 = d1.sample(n_samp).permute(2, 0, 1)
         d2_1 = Normal(-2 * unit, unit)
         d2_2 = Normal(2 * unit, .5 * unit)
         d2_b = Bernoulli(.5 * unit)
         z2_b = d2_b.sample(n_samp).float()
         # Gaussian Mixture
         z2 = ((z2_b * d2_1.sample(n_samp) +
                (1 - z2_b) * d2_2.sample(n_samp)).permute(2, 0, 1))
         d3 = Uniform(-5 * unit, theta[:, 2])
         z3 = d3.sample(n_samp).permute(2, 0, 1)
         d4 = Exponential(.5 * unit)
         z4 = d4.sample(n_samp).permute(2, 0, 1)
         z = torch.cat((z0, z1, z2, z3, z4), 2)
         X = torch.matmul(self.R,
                          z.view(-1, 5).unsqueeze(2)).view(n_exp, -1, 5)
     return X
示例#9
0
    def test_Parameter(self):
        # ===== Start stuff ===== #
        param = Parameter(Normal(0., 1.))
        param.sample_(1000)

        assert param.shape == torch.Size([1000])

        # ===== Construct view ===== #
        view = param.view(1000, 1)

        # ===== Change values ===== #
        param.values = torch.empty_like(param).normal_()

        assert (view[:, 0] == param).all()

        # ===== Have in tuple ===== #
        vals = (param.view(1000, 1, 1), )

        param.values = torch.empty_like(param).normal_()

        assert (vals[0][:, 0, 0] == param).all()

        # ===== Set t_values ===== #
        view = param.view(1000, 1)

        param.t_values = torch.empty_like(param).normal_()

        assert (view[:, 0] == param.t_values).all()

        # ===== Check we cannot set different shape ===== #
        with self.assertRaises(ValueError):
            param.values = torch.empty(1).normal_()

        # ===== Check that we cannot set out of bounds values for parameter ===== #
        positive = Parameter(Exponential(1.))
        positive.sample_(1)

        with self.assertRaises(ValueError):
            positive.values = -torch.empty_like(positive).normal_().abs()

        # ===== Check that we can set transformed values ===== #
        values = torch.empty_like(positive).normal_()
        positive.t_values = values

        assert (positive == positive.bijection(values)).all()
            if (t > 10
                    and abs(self.loss_hist[-1] / self.loss_hist[-2] - 1) < tol
                    and verbose):
                print('Convergence detected!')
                break


if __name__ == '__main__':
    torch.manual_seed(0)
    theta_true = 5.3
    # NOTE: see n=3, n=10, n=30, n=1000
    # For small n, the MAP estimate with jacobian is far from truth.  As n
    # increases, the difference is small, because the posterior gets more
    # peaked.
    n = 5
    y = Exponential(theta_true).sample((n, ))

    model = Model(y)
    model.fit(lr=.01)

    model_with_jacobian = Model(y, use_jacobian=True)
    model_with_jacobian.fit(lr=.01)

    true_map_est = ((n + model.prior_shape - 1) /
                    (model.prior_rate + model.y_sum))

    map_est = model.log_theta.exp()
    map_est_with_jacobian = model_with_jacobian.log_theta.exp()

    print('True MAP est: {}'.format(true_map_est))
    print('MAP est (w/o jacobian): {}'.format(map_est))
示例#11
0
 def test_exponential_sample(self):
     self._set_rng_seed(1)
     for rate in [1e-5, 1.0, 10.]:
         self._check_sampler_sampler(Exponential(rate),
                                     scipy.stats.expon(scale=1. / rate),
                                     'Exponential(rate={})'.format(rate))
示例#12
0
    def test_exponential(self):
        rate = Variable(torch.randn(5, 5).abs(), requires_grad=True)
        rate_1d = Variable(torch.randn(1).abs(), requires_grad=True)
        self.assertEqual(Exponential(rate).sample().size(), (5, 5))
        self.assertEqual(Exponential(rate).sample((7, )).size(), (7, 5, 5))
        self.assertEqual(Exponential(rate_1d).sample((1, )).size(), (1, 1))
        self.assertEqual(Exponential(rate_1d).sample().size(), (1, ))
        self.assertEqual(Exponential(0.2).sample((1, )).size(), (1, ))
        self.assertEqual(Exponential(50.0).sample((1, )).size(), (1, ))

        self._gradcheck_log_prob(Exponential, (rate, ))
        state = torch.get_rng_state()
        eps = rate.new(rate.size()).exponential_()
        torch.set_rng_state(state)
        z = Exponential(rate).rsample()
        z.backward(torch.ones_like(z))
        self.assertEqual(rate.grad, -eps / rate**2)
        rate.grad.zero_()
        self.assertEqual(z.size(), (5, 5))

        def ref_log_prob(idx, x, log_prob):
            m = rate.data.view(-1)[idx]
            expected = math.log(m) - m * x
            self.assertAlmostEqual(log_prob, expected, places=3)

        self._check_log_prob(Exponential(rate), ref_log_prob)
示例#13
0
    def likelihood(self, x, z_obj):
        """Evaluate likelihood of x under model.

        Args:
            x (torch.Tensor), (n, T, c, w, h) The given sequence of observations.
            z_obj (torch.Tensor), (nTO, 4): Samples from z distribution.

        Returns:
            log_p_xz (torch.Tensor), (nT): Image likelihood.

        """
        # reshape x to merge batch and sequences to pseudo batch since spn
        # will work on image basis shape (n4, c, w, h)
        x_img = x.flatten(end_dim=1)

        # 1. Background Likelihood
        # reshape to (n4, o, 4) and extract marginalisation information
        z_img = z_obj.view(-1, self.c.num_obj, 4)
        marginalise_patch, marginalise_bg, overlap_ratios = self.masks_from_z(
            z_img)
        # flatten, st. shape is (n4, cwh) for both
        img_flat = x_img.flatten(start_dim=1)
        marg_flat = marginalise_bg.flatten(start_dim=1)
        # get likelihood of background under bg_spn, output from (n4, 1) to (n4)
        bg_loglik = self.bg_spn.forward(img_flat, marg_flat)[:, 0]

        # 2. Patch Likelihoods
        # extract patches (n4o, c, w, h) from transformer
        patches = self.patches_from_z(x_img, z_obj)
        # flatten into (n4o, c w_out h_out)
        patches_flat = patches.flatten(start_dim=1)
        marginalise_flat = marginalise_patch.flatten(start_dim=1)
        # (n4o)
        patches_loglik = self.obj_spn.forward(patches_flat,
                                              marginalise_flat)[:, 0]
        # scale patch_likelihoods by size of patch to obtain
        # well calibrated likelihoods
        patches_loglik = patches_loglik * z_obj[:, 0] * z_obj[:, 1]
        # shape n4o to n4,o
        patches_loglik = patches_loglik.view(-1, self.c.num_obj)

        # 3. Add Exponential overlap_penalty
        overlap_prior = Exponential(self.c.overlap_beta)
        overlap_log_liks = overlap_prior.log_prob(overlap_ratios)

        # 4. Assemble final img likelihood E_q(z|x)[log p(x, z)]
        # expectation is approximated by a single sample
        patches_loglik = patches_loglik.sum(1)
        overlap_log_liks = overlap_log_liks.sum(1)
        scores = [bg_loglik, patches_loglik, overlap_log_liks]
        scores = torch.stack(scores, -1)
        # shape (n4)
        log_p_xz = scores.sum(-1)

        if ((self.step_counter % self.c.print_every == 0)
                or (self.step_counter % self.c.plot_every == 0)):
            if self.c.debug:
                self.prop_dict['bg'] = bg_loglik.mean().detach()
                self.prop_dict['patch'] = patches_loglik.mean().detach()
                self.prop_dict['overlap'] = overlap_log_liks.mean().detach()
            if self.c.debug and self.c.debug_extend_plots:
                self.prop_dict['overlap_ratios'] = overlap_ratios.detach()
                self.prop_dict['patches'] = patches.detach()
                self.prop_dict['marginalise_flat'] = marginalise_flat.detach()
                self.prop_dict['patches_loglik'] = patches_loglik.detach()
                self.prop_dict['marginalise_bg'] = marginalise_bg.detach()
                self.prop_dict['bg_loglik'] = bg_loglik.detach()

        return log_p_xz, self.prop_dict
示例#14
0
    assert isinstance(transform_to_inspect, target_transform)

    samples = prior.sample((2, ))
    transformed_samples = transform(samples)
    assert torch.allclose(samples, transform.inv(transformed_samples))


@pytest.mark.parametrize(
    "prior, enable_transform",
    (
        (BoxUniform(zeros(5), ones(5)), True),
        (BoxUniform(zeros(1), ones(1)), True),
        (BoxUniform(zeros(5), ones(5)), False),
        (MultivariateNormal(zeros(5), eye(5)), True),
        (Exponential(rate=ones(1)), True),
        (LogNormal(zeros(1), ones(1)), True),
        (
            MultipleIndependent(
                [Exponential(rate=ones(1)),
                 BoxUniform(zeros(5), ones(5))]),
            True,
        ),
    ),
)
def test_mcmc_transform(prior, enable_transform):
    """
    Test whether the transform for MCMC returns the log_abs_det in the correct shape.
    """

    num_samples = 1000
示例#15
0
 def sample(self, size):
     m = Exponential(torch.tensor([1.0]))
     return m.sample(size)
示例#16
0
 def test_exponential_sample(self):
     set_rng_seed(1)  # see Note [Randomized statistical tests]
     for rate in [1e-5, 1.0, 10.]:
         self._check_sampler_sampler(Exponential(rate),
                                     scipy.stats.expon(scale=1. / rate),
                                     'Exponential(rate={})'.format(rate))