コード例 #1
0
ファイル: snpe_a.py プロジェクト: michaeldeistler/sbi
    def log_prob(self, inputs, context=None):
        if not self._apply_correction:
            return self._neural_net.log_prob(inputs, context)
        else:
            # When we want to compute the approx. posterior, a proposal prior \tilde{p}
            # has already been observed. To analytically calculate the log-prob of the
            # Gaussian, we first need to compute the mixture components.

            # Compute the mixture components of the proposal posterior.
            logits_pp, m_pp, prec_pp = self._posthoc_correction(context)

            # z-score theta if it z-scoring had been requested.
            theta = self._maybe_z_score_theta(inputs)

            # Compute the log_prob of theta under the product.
            log_prob_proposal_posterior = utils.sbiutils.mog_log_prob(
                theta,
                logits_pp,
                m_pp,
                prec_pp,
            )
            utils.assert_all_finite(
                log_prob_proposal_posterior, "proposal posterior eval"
            )
            return log_prob_proposal_posterior  # \hat{p} from eq (3) in [1]
コード例 #2
0
ファイル: snpe_c.py プロジェクト: bkmi/sbi
    def _log_prob_proposal_posterior_mog(self, theta: Tensor, x: Tensor,
                                         proposal: DirectPosterior) -> Tensor:
        """Return log-probability of the proposal posterior for MoG proposal.

        For MoG proposals and MoG density estimators, this can be done in closed form
        and does not require atomic loss (i.e. there will be no leakage issues).

        Notation:

        m are mean vectors.
        prec are precision matrices.
        cov are covariance matrices.

        _p at the end indicates that it is the proposal.
        _d indicates that it is the density estimator.
        _pp indicates the proposal posterior.

        All tensors will have shapes (batch_dim, num_components, ...)

        Args:
            theta: Batch of parameters θ.
            x: Batch of data.
            proposal: Proposal distribution.

        Returns:
            Log-probability of the proposal posterior.
        """

        # Evaluate the proposal. MDNs do not have functionality to run the embedding_net
        # and then get the mixture_components (**without** calling log_prob()). Hence,
        # we call them separately here.
        encoded_x = proposal.posterior_estimator._embedding_net(
            proposal.default_x)
        dist = (proposal.posterior_estimator._distribution
                )  # defined to avoid ugly black formatting.
        logits_p, m_p, prec_p, _, _ = dist.get_mixture_components(encoded_x)
        norm_logits_p = logits_p - torch.logsumexp(
            logits_p, dim=-1, keepdim=True)

        # Evaluate the density estimator.
        encoded_x = self._neural_net._embedding_net(x)
        dist = self._neural_net._distribution  # defined to avoid black formatting.
        logits_d, m_d, prec_d, _, _ = dist.get_mixture_components(encoded_x)
        norm_logits_d = logits_d - torch.logsumexp(
            logits_d, dim=-1, keepdim=True)

        # z-score theta if it z-scoring had been requested.
        theta = self._maybe_z_score_theta(theta)

        # Compute the MoG parameters of the proposal posterior.
        logits_pp, m_pp, prec_pp, cov_pp = self._automatic_posterior_transformation(
            norm_logits_p, m_p, prec_p, norm_logits_d, m_d, prec_d)

        # Compute the log_prob of theta under the product.
        log_prob_proposal_posterior = utils.mog_log_prob(
            theta, logits_pp, m_pp, prec_pp)
        utils.assert_all_finite(log_prob_proposal_posterior,
                                "proposal posterior eval")

        return log_prob_proposal_posterior
コード例 #3
0
    def _log_prob_proposal_posterior(self, theta: Tensor, x: Tensor,
                                     masks: Tensor) -> Tensor:
        """
        Return importance-weighted log probability (Lueckmann, Goncalves et al., 2017).

        Args:
            theta: Batch of parameters θ.
            x: Batch of data.
            masks: Whether to retrain with prior loss (for each prior sample).

        Returns:
            Log probability of proposal posterior.
        """

        batch_size = theta.shape[0]

        # Evaluate posterior.
        log_prob_posterior = self._posterior.net.log_prob(theta, x)
        log_prob_posterior = log_prob_posterior.reshape(batch_size)
        utils.assert_all_finite(log_prob_posterior, "posterior eval")

        # Evaluate prior.
        log_prob_prior = self._prior.log_prob(theta).reshape(batch_size)
        utils.assert_all_finite(log_prob_prior, "prior eval.")

        # Evaluate proposal.
        log_prob_proposal = self._model_bank[-1].net.log_prob(theta, x)
        utils.assert_all_finite(log_prob_proposal, "proposal posterior eval")

        # Compute log prob with importance weights.
        log_prob = torch.exp(log_prob_prior -
                             log_prob_proposal) * log_prob_posterior

        return log_prob
コード例 #4
0
    def _log_prob_proposal_posterior_atomic(self, theta: Tensor, x: Tensor,
                                            masks: Tensor):
        """
        Return log probability of the proposal posterior for atomic proposals.

        We have two main options when evaluating the proposal posterior.
            (1) Generate atoms from the proposal prior.
            (2) Generate atoms from a more targeted distribution, such as the most
                recent posterior.
        If we choose the latter, it is likely beneficial not to do this in the first
        round, since we would be sampling from a randomly-initialized neural density
        estimator.

        Args:
            theta: Batch of parameters θ.
            x: Batch of data.
            masks: Mask that is True for prior samples in the batch in order to train
                them with prior loss.

        Returns:
            Log-probability of the proposal posterior.
        """

        batch_size = theta.shape[0]

        num_atoms = clamp_and_warn("num_atoms",
                                   self._num_atoms,
                                   min_val=2,
                                   max_val=batch_size)

        # Each set of parameter atoms is evaluated using the same x,
        # so we repeat rows of the data x, e.g. [1, 2] -> [1, 1, 2, 2]
        repeated_x = repeat_rows(x, num_atoms)

        # To generate the full set of atoms for a given item in the batch,
        # we sample without replacement num_atoms - 1 times from the rest
        # of the theta in the batch.
        probs = ones(batch_size,
                     batch_size) * (1 - eye(batch_size)) / (batch_size - 1)

        choices = torch.multinomial(probs,
                                    num_samples=num_atoms - 1,
                                    replacement=False)
        contrasting_theta = theta[choices]

        # We can now create our sets of atoms from the contrasting parameter sets
        # we have generated.
        atomic_theta = torch.cat((theta[:, None, :], contrasting_theta),
                                 dim=1).reshape(batch_size * num_atoms, -1)

        # Evaluate large batch giving (batch_size * num_atoms) log prob posterior evals.
        log_prob_posterior = self._neural_net.log_prob(atomic_theta,
                                                       repeated_x)
        utils.assert_all_finite(log_prob_posterior, "posterior eval")
        log_prob_posterior = log_prob_posterior.reshape(batch_size, num_atoms)

        # Get (batch_size * num_atoms) log prob prior evals.
        log_prob_prior = self._prior.log_prob(atomic_theta)
        log_prob_prior = log_prob_prior.reshape(batch_size, num_atoms)
        utils.assert_all_finite(log_prob_prior, "prior eval")

        # Compute unnormalized proposal posterior.
        unnormalized_log_prob = log_prob_posterior - log_prob_prior

        # Normalize proposal posterior across discrete set of atoms.
        log_prob_proposal_posterior = unnormalized_log_prob[:, 0] - torch.logsumexp(
            unnormalized_log_prob, dim=-1)
        utils.assert_all_finite(log_prob_proposal_posterior,
                                "proposal posterior eval")

        # XXX This evaluates the posterior on _all_ prior samples
        if self._use_combined_loss:
            log_prob_posterior_non_atomic = self._neural_net.log_prob(theta, x)
            masks = masks.reshape(-1)
            log_prob_proposal_posterior = (
                masks * log_prob_posterior_non_atomic +
                log_prob_proposal_posterior)

        return log_prob_proposal_posterior