Пример #1
0
    def loss(self, model, guide, *args, **kwargs):
        """
        :returns: returns an estimate of the ELBO
        :rtype: float

        Evaluates the ELBO with an estimator that uses num_particles many samples/particles.
        """
        elbo = 0.0
        for weight, model_trace, guide_trace, log_r in self._get_traces(
                model, guide, *args, **kwargs):
            elbo_particle = weight * 0

            log_pdf = "batch_log_pdf" if (
                self.enum_discrete and weight.size(0) > 1) else "log_pdf"
            for name in model_trace.nodes.keys():
                if model_trace.nodes[name]["type"] == "sample":
                    if model_trace.nodes[name]["is_observed"]:
                        elbo_particle += model_trace.nodes[name][log_pdf]
                    else:
                        elbo_particle += model_trace.nodes[name][log_pdf]
                        elbo_particle -= guide_trace.nodes[name][log_pdf]

            # drop terms of weight zero to avoid nans
            if isinstance(weight, numbers.Number):
                if weight == 0.0:
                    elbo_particle = torch_zeros_like(elbo_particle)
            else:
                elbo_particle[weight == 0] = 0.0

            elbo += torch_data_sum(weight * elbo_particle)

        loss = -elbo
        return loss
Пример #2
0
    def sample(self):
        """
        Returns a sample which has the same shape as `ps`, except that the last dimension
        will have the same size as the number of events.

        :return: sample from the OneHotCategorical distribution
        :rtype: torch.Tensor
        """
        sample = torch_multinomial(self.ps.data, 1,
                                   replacement=True).expand(*self.shape())
        sample_one_hot = torch_zeros_like(self.ps.data).scatter_(-1, sample, 1)
        return Variable(sample_one_hot)
def test_batch_log_pdf(lognormal):
    dist_params = lognormal.get_dist_params(0)
    mu_lognorm = dist_params['mu']
    sigma_lognorm = dist_params['sigma']
    mu_z = torch_zeros_like(mu_lognorm)
    sigma_z = torch_ones_like(sigma_lognorm)
    trans_dist = get_transformed_dist(dist.normal, sigma_lognorm, mu_lognorm)
    test_data = lognormal.get_test_data(0)
    log_px_torch = trans_dist.batch_log_pdf(test_data, mu_z, sigma_z).data.cpu().numpy()
    log_px_np = sp.lognorm.logpdf(
        test_data.data.cpu().numpy(),
        sigma_lognorm.data.cpu().numpy(),
        scale=np.exp(mu_lognorm.data.cpu().numpy())).sum(-1, keepdims=True)
    assert_equal(log_px_torch, log_px_np, prec=1e-4)
Пример #4
0
    def batch_log_pdf(self, x):
        """
        Evaluates log probability densities for one or a batch of samples and parameters.
        The last dimension for `ps` encodes the event probabilities, and the remaining
        dimensions are considered batch dimensions.

        `ps` and `vs` are first broadcasted to the size of the data `x`. The
        data tensor is used to to create a mask over `vs` where a 1 in the mask
        indicates that the corresponding value in `vs` was selected. Since, `ps`
        and `vs` have the same size, this mask when applied over `ps` gives
        the probabilities of the selected events. The method returns the logarithm
        of these probabilities.

        :return: tensor with log probabilities for each of the batches.
        :rtype: torch.autograd.Variable
        """
        logits = self.logits
        vs = self.vs
        x = self._process_data(x)
        batch_pdf_shape = self.batch_shape(x) + (1, )
        # probability tensor mask when data is numpy
        if isinstance(x, np.ndarray):
            batch_vs_size = x.shape[:-1] + (vs.shape[-1], )
            vs = np.broadcast_to(vs, batch_vs_size)
            boolean_mask = torch.from_numpy((vs == x).astype(int))
        # probability tensor mask when data is pytorch tensor
        else:
            x = x.cuda() if logits.is_cuda else x.cpu()
            batch_ps_shape = self.batch_shape(x) + self.event_shape()
            logits = logits.expand(batch_ps_shape)

            if vs is not None:
                vs = vs.expand(batch_ps_shape)
                boolean_mask = (vs == x)
            elif self.one_hot:
                boolean_mask = x
            else:
                boolean_mask = torch_zeros_like(logits.data).scatter_(
                    -1, x.data.long(), 1)
        boolean_mask = boolean_mask.cuda(
        ) if logits.is_cuda else boolean_mask.cpu()
        if not isinstance(boolean_mask, Variable):
            boolean_mask = Variable(boolean_mask)
        # apply log function to masked probability tensor
        batch_log_pdf = logits.masked_select(
            boolean_mask.byte()).contiguous().view(batch_pdf_shape)
        if self.log_pdf_mask is not None:
            batch_log_pdf = batch_log_pdf * self.log_pdf_mask
        return batch_log_pdf
def test_mean_and_var(lognormal):
    dist_params = lognormal.get_dist_params(0)
    mu_lognorm = dist_params['mu']
    sigma_lognorm = dist_params['sigma']
    mu_z = torch_zeros_like(mu_lognorm)
    sigma_z = torch_ones_like(sigma_lognorm)
    trans_dist = get_transformed_dist(dist.normal, sigma_lognorm, mu_lognorm)
    torch_samples = trans_dist.sample(mu_z, sigma_z, batch_size=lognormal.get_num_samples(0))
    torch_mean = torch.mean(torch_samples, 0)
    torch_std = torch.std(torch_samples, 0)
    analytic_mean = lognormal.pyro_dist.analytic_mean(**dist_params)
    analytic_std = lognormal.pyro_dist.analytic_var(**dist_params) ** 0.5
    precision = analytic_mean.max().data[0] * 0.05
    assert_equal(torch_mean, analytic_mean, prec=precision)
    assert_equal(torch_std, analytic_std, prec=precision)
Пример #6
0
    def sample(self):
        """
        Returns a sample which has the same shape as `ps` (or `vs`). The type
        of the sample is `numpy.ndarray` if `vs` is a list or a numpy array,
        else a tensor is returned.

        :return: sample from the Categorical distribution
        :rtype: numpy.ndarray or torch.LongTensor
        """
        sample = torch_multinomial(self.ps.data, 1, replacement=True).expand(*self.shape())
        sample_one_hot = torch_zeros_like(self.ps.data).scatter_(-1, sample, 1)

        if self.vs is not None:
            if isinstance(self.vs, np.ndarray):
                sample_bool_index = sample_one_hot.cpu().numpy().astype(bool)
                return self.vs[sample_bool_index].reshape(*self.shape())
            else:
                return self.vs.masked_select(sample_one_hot.byte())
        return Variable(sample)
Пример #7
0
    def loss_and_grads(self, model, guide, *args, **kwargs):
        """
        :returns: returns an estimate of the ELBO
        :rtype: float

        Computes the ELBO as well as the surrogate ELBO that is used to form the gradient estimator.
        Performs backward on the latter. Num_particle many samples are used to form the estimators.
        """
        elbo = 0.0
        # grab a trace from the generator
        for weight, model_trace, guide_trace, log_r in self._get_traces(
                model, guide, *args, **kwargs):
            elbo_particle = weight * 0
            surrogate_elbo_particle = weight * 0
            # compute elbo and surrogate elbo
            log_pdf = "batch_log_pdf" if (
                self.enum_discrete and weight.size(0) > 1) else "log_pdf"
            for name in model_trace.nodes.keys():
                if model_trace.nodes[name]["type"] == "sample":
                    if model_trace.nodes[name]["is_observed"]:
                        elbo_particle += model_trace.nodes[name][log_pdf]
                        surrogate_elbo_particle += model_trace.nodes[name][
                            log_pdf]
                    else:
                        lp_lq = model_trace.nodes[name][
                            log_pdf] - guide_trace.nodes[name][log_pdf]
                        elbo_particle += lp_lq
                        if guide_trace.nodes[name]["fn"].reparameterized:
                            surrogate_elbo_particle += lp_lq
                        else:
                            # XXX should the user be able to control inclusion of the -logq term below?
                            surrogate_elbo_particle += model_trace.nodes[name][log_pdf] + \
                                log_r.detach() * guide_trace.nodes[name][log_pdf]

            # drop terms of weight zero to avoid nans
            if isinstance(weight, numbers.Number):
                if weight == 0.0:
                    elbo_particle = torch_zeros_like(elbo_particle)
                    surrogate_elbo_particle = torch_zeros_like(
                        surrogate_elbo_particle)
            else:
                weight_eq_zero = (weight == 0)
                elbo_particle[weight_eq_zero] = 0.0
                surrogate_elbo_particle[weight_eq_zero] = 0.0

            elbo += torch_data_sum(weight * elbo_particle)
            surrogate_elbo_particle = torch_sum(weight *
                                                surrogate_elbo_particle)

            # collect parameters to train from model and guide
            trainable_params = set(site["value"]
                                   for trace in (model_trace, guide_trace)
                                   for site in trace.nodes.values()
                                   if site["type"] == "param")

            if trainable_params:
                surrogate_loss_particle = -surrogate_elbo_particle
                torch_backward(surrogate_loss_particle)
                pyro.get_param_store().mark_params_active(trainable_params)

        loss = -elbo

        return loss