Пример #1
0
        def _draw_likelihood_samples(self,
                                     function_dist,
                                     *args,
                                     sample_shape=None,
                                     **kwargs):
            if self.training:
                num_event_dims = len(function_dist.event_shape)
                function_dist = base_distributions.Normal(
                    function_dist.mean, function_dist.variance.sqrt())
                function_dist = base_distributions.Independent(
                    function_dist, num_event_dims - 1)

            plate_name = self.name_prefix + ".num_particles_vectorized"
            num_samples = settings.num_likelihood_samples.value()
            max_plate_nesting = max(self.max_plate_nesting,
                                    len(function_dist.batch_shape))
            with pyro.plate(plate_name,
                            size=num_samples,
                            dim=(-max_plate_nesting - 1)):
                if sample_shape is None:
                    function_samples = pyro.sample(self.name_prefix,
                                                   function_dist.mask(False))
                    # Deal with the fact that we're not assuming conditional indendence over data points here
                    function_samples = function_samples.squeeze(
                        -len(function_dist.event_shape) - 1)
                else:
                    sample_shape = sample_shape[:-len(function_dist.batch_shape
                                                      )]
                    function_samples = function_dist(sample_shape)

                if not self.training:
                    function_samples = function_samples.squeeze(
                        -len(function_dist.event_shape) - 1)
                return self.forward(function_samples, *args, **kwargs)
Пример #2
0
 def forward(self, function_samples: Tensor, *params: Any,
             **kwargs: Any) -> base_distributions.Normal:
     noise = self._shaped_noise_covar(function_samples.shape, *params,
                                      **kwargs).diag()
     noise = noise.view(*noise.shape[:-1], *function_samples.shape[-2:])
     return base_distributions.Independent(
         base_distributions.Normal(function_samples, noise.sqrt()), 1)
    def log_marginal(self, observations: Tensor,
                     function_dist: MultivariateNormal, *params: Any,
                     **kwargs: Any) -> Tensor:
        marginal = self.marginal(function_dist, *params, **kwargs)
        # We're making everything conditionally independent
        indep_dist = base_distributions.Normal(
            marginal.mean,
            marginal.variance.clamp_min(1e-8).sqrt())
        res = indep_dist.log_prob(observations)

        # Do appropriate summation for multitask Gaussian likelihoods
        num_event_dim = len(function_dist.event_shape)
        if num_event_dim > 1:
            res = res.sum(list(range(-1, -num_event_dim, -1)))
        return res
Пример #4
0
    def _draw_likelihood_samples(self,
                                 function_dist,
                                 r_dist,
                                 seg_bag_idx,
                                 bag_labels_gt,
                                 video_names,
                                 *args,
                                 sample_shape=None,
                                 **kwargs):

        if sample_shape is None:
            sample_shape = torch.Size(
                [settings.num_likelihood_samples.value()] + [1] *
                (self.max_plate_nesting - len(function_dist.batch_shape) - 1))
        else:
            sample_shape = sample_shape[:-len(function_dist.batch_shape) - 1]
        if self.training:
            num_event_dims = len(function_dist.event_shape)

            function_dist = base_distributions.Normal(
                function_dist.mean, function_dist.variance.sqrt())

            function_dist = base_distributions.Independent(
                function_dist, num_event_dims - 1)
        n_videos, n_segments = r_dist.shape
        z_samples = torch.zeros(settings.num_likelihood_samples.value(),
                                n_videos, n_segments)
        if torch.cuda.is_available():
            z_samples = Variable(z_samples).cuda()
        for i, video in enumerate(video_names):
            z_samples[:, i, :] = torch.nn.functional.one_hot(
                torch.multinomial(r_dist[i],
                                  settings.num_likelihood_samples.value(),
                                  replacement=True),
                num_classes=n_segments)

        function_samples = function_dist.rsample(sample_shape)

        return self.forward(function_samples, z_samples, seg_bag_idx,
                            bag_labels_gt, video_names, *args, **kwargs)
Пример #5
0
 def marginal(self, function_dist, **kwargs):
     mean = function_dist.mean
     var = function_dist.variance
     link = mean.div(torch.sqrt(1 + var))
     output_probs = base_distributions.Normal(0, 1).cdf(link)
     return base_distributions.Bernoulli(probs=output_probs)
Пример #6
0
 def forward(self, function_samples, **kwargs):
     output_probs = base_distributions.Normal(0, 1).cdf(function_samples)
     return base_distributions.Bernoulli(probs=output_probs)
 def forward(self, function_samples: Tensor, *params: Any,
             **kwargs: Any) -> base_distributions.Normal:
     noise = self._shaped_noise_covar(function_samples.shape, *params,
                                      **kwargs).diag()
     return base_distributions.Normal(function_samples, noise.sqrt())