コード例 #1
0
def test_batched_first_of_batch():
    t = torch.ones(10, 2)
    out_t = torchutils.batched_first_of_batch(t)
    assert (out_t == torch.ones(1, 2)).all()

    t = torch.ones(1, 2)
    out_t = torchutils.batched_first_of_batch(t)
    assert (out_t == torch.ones(1, 2)).all()
コード例 #2
0
ファイル: posterior.py プロジェクト: www3cam/sbi
    def _log_prob_snpe(self, theta: Tensor, x: Tensor,
                       norm_posterior: bool) -> Tensor:
        r"""
        Return posterior log probability $p(\theta|x)$.

        The posterior probability will be only normalized if explicitly requested,
        but it will be always zeroed out (i.e. given -∞ log-prob) outside the prior
        support.
        """

        unnorm_log_prob = self.net.log_prob(theta, x)

        # Force probability to be zero outside prior support.
        is_prior_finite = torch.isfinite(self._prior.log_prob(theta))

        masked_log_prob = torch.where(
            is_prior_finite,
            unnorm_log_prob,
            torch.tensor(float("-inf"), dtype=torch.float32),
        )

        log_factor = (log(self.leakage_correction(
            x=batched_first_of_batch(x))) if norm_posterior else 0)

        return masked_log_prob - log_factor
    def log_prob(
        self,
        theta: Tensor,
        x: Optional[Tensor] = None,
        norm_posterior: bool = True,
        track_gradients: bool = False,
    ) -> Tensor:
        r"""
        Returns the log-probability of the posterior $p(\theta|x).$

        Args:
            theta: Parameters $\theta$.
            x: Conditioning context for posterior $p(\theta|x)$. If not provided, fall
                back onto an `x_o` if previously provided for multi-round training, or
                to another default if set later for convenience, see `.set_default_x()`.
            norm_posterior: Whether to enforce a normalized posterior density.
                Renormalization of the posterior is useful when some
                probability falls out or leaks out of the prescribed prior support.
                The normalizing factor is calculated via rejection sampling, so if you
                need speedier but unnormalized log posterior estimates set here
                `norm_posterior=False`. The returned log posterior is set to
                -∞ outside of the prior support regardless of this setting.
            track_gradients: Whether the returned tensor supports tracking gradients.
                This can be helpful for e.g. sensitivity analysis, but increases memory
                consumption.

        Returns:
            `(len(θ),)`-shaped log posterior probability $\log p(\theta|x)$ for θ in the
            support of the prior, -∞ (corresponding to 0 probability) outside.

        """

        # TODO Train exited here, entered after sampling?
        self.net.eval()
        if theta.get_device() < 0:
            device = 'cpu'
        else:
            device = 'cuda:'+str(theta.get_device())
        theta, x = self._prepare_theta_and_x_for_log_prob_(theta, x)
        x = x.to(device)
        with torch.set_grad_enabled(track_gradients):
            unnorm_log_prob = self.net.log_prob(theta, x)

            # Force probability to be zero outside prior support.
            is_prior_finite = torch.isfinite(self._prior.log_prob(theta))

            masked_log_prob = torch.where(
                is_prior_finite,
                unnorm_log_prob,
                torch.tensor(float("-inf"), dtype=torch.float32).to(device),
            )

            log_factor = (
                log(self.leakage_correction(x=batched_first_of_batch(x)))
                if norm_posterior
                else 0
            )

            return masked_log_prob - log_factor
コード例 #4
0
ファイル: direct_posterior.py プロジェクト: jyotikab/sbi
    def log_prob(
        self,
        theta: Tensor,
        x: Optional[Tensor] = None,
        norm_posterior: bool = True,
        track_gradients: bool = False,
        leakage_correction_params: Optional[dict] = None,
    ) -> Tensor:
        r"""
        Returns the log-probability of the posterior $p(\theta|x).$

        Args:
            theta: Parameters $\theta$.
            x: Conditioning context for posterior $p(\theta|x)$. If not provided,
                fall back onto `x` passed to `set_default_x()`.
            norm_posterior: Whether to enforce a normalized posterior density.
                Renormalization of the posterior is useful when some
                probability falls out or leaks out of the prescribed prior support.
                The normalizing factor is calculated via rejection sampling, so if you
                need speedier but unnormalized log posterior estimates set here
                `norm_posterior=False`. The returned log posterior is set to
                -∞ outside of the prior support regardless of this setting.
            track_gradients: Whether the returned tensor supports tracking gradients.
                This can be helpful for e.g. sensitivity analysis, but increases memory
                consumption.
            leakage_correction_params: A `dict` of keyword arguments to override the
                default values of `leakage_correction()`. Possible options are:
                `num_rejection_samples`, `force_update`, `show_progress_bars`, and
                `rejection_sampling_batch_size`.
                These parameters only have an effect if `norm_posterior=True`.

        Returns:
            `(len(θ),)`-shaped log posterior probability $\log p(\theta|x)$ for θ in the
            support of the prior, -∞ (corresponding to 0 probability) outside.

        """

        # TODO Train exited here, entered after sampling?
        self.net.eval()

        theta, x = self._prepare_theta_and_x_for_log_prob_(theta, x)

        with torch.set_grad_enabled(track_gradients):

            # Evaluate on device, move back to cpu for comparison with prior.
            unnorm_log_prob = self.net.log_prob(theta.to(self._device),
                                                x.to(self._device)).cpu()

            # Force probability to be zero outside prior support.
            is_prior_finite = torch.isfinite(self._prior.log_prob(theta))

            masked_log_prob = torch.where(
                is_prior_finite,
                unnorm_log_prob,
                torch.tensor(float("-inf"), dtype=torch.float32),
            )

            if leakage_correction_params is None:
                leakage_correction_params = dict()  # use defaults
            log_factor = (log(
                self.leakage_correction(x=batched_first_of_batch(x),
                                        **leakage_correction_params))
                          if norm_posterior else 0)

            return masked_log_prob - log_factor