Esempio n. 1
0
    def _sample_and_perturb(
        self, particles: Tensor, weights: Tensor, num_samples: int = 1
    ) -> Tensor:
        """Sample and perturb batch of new parameters from trace.

        Reject sampled and perturbed parameters outside of prior.
        """

        num_accepted = 0
        parameters = []
        while num_accepted < num_samples:
            parms = self.sample_from_population_with_weights(
                particles, weights, num_samples=num_samples - num_accepted
            )

            # Create kernel on params and perturb.
            parms_perturbed = self.get_new_kernel(parms).sample()

            is_within_prior = within_support(self.prior, parms_perturbed)
            num_accepted += int(is_within_prior.sum().item())

            if num_accepted > 0:
                parameters.append(parms_perturbed[is_within_prior])

        return torch.cat(parameters)
Esempio n. 2
0
    def np_potential(self, theta: np.ndarray) -> ScalarFloat:
        r"""Return posterior theta log prob. $p(\theta|x)$, $-\infty$ if outside prior."

        Args:
            theta: Parameters $\theta$, batch dimension 1.

        Returns:
            Posterior log probability $\log(p(\theta|x))$.
        """
        theta = torch.as_tensor(theta, dtype=torch.float32)
        theta = ensure_theta_batched(theta)
        num_batch = theta.shape[0]

        x_batched = ensure_x_batched(self.x)
        # Repeat x over batch dim to match theta batch, accounting for multi-D x.
        x_repeated = x_batched.repeat(num_batch,
                                      *(1 for _ in range(x_batched.ndim - 1)))

        with torch.set_grad_enabled(False):
            target_log_prob = self.posterior_nn.log_prob(
                inputs=theta.to(self.x.device),
                context=x_repeated,
            )
            in_prior_support = within_support(self.prior, theta)
            target_log_prob[~in_prior_support] = -float("Inf")

        return target_log_prob
Esempio n. 3
0
    def pyro_potential(self,
                       theta: Dict[str, Tensor],
                       track_gradients: bool = False) -> Tensor:
        r"""Return posterior log prob. of theta $p(\theta|x)$, -inf where outside prior.

        Args:
            theta: Parameters $\theta$ (from pyro sampler).

        Returns:
            Posterior log probability $p(\theta|x)$, masked outside of prior.
        """

        theta = next(iter(theta.values()))

        with torch.set_grad_enabled(track_gradients):
            # Notice opposite sign to `posterior_potential`.
            # Move theta to device for evaluation.
            log_prob_posterior = -self.posterior_nn.log_prob(
                inputs=theta.to(self.device),
                context=self.x,
            ).cpu()

        in_prior_support = within_support(self.prior, theta)

        return torch.where(
            in_prior_support,
            log_prob_posterior,
            float("-inf") * torch.ones_like(log_prob_posterior),
        )
Esempio n. 4
0
def test_prior_wrappers(wrapper, prior, kwargs):
    """Test prior wrappers to pytorch distributions."""
    prior = wrapper(prior, **kwargs)

    # use 2 here to test for minimal case >1
    batch_size = 2
    theta = prior.sample((batch_size,))
    assert isinstance(theta, Tensor)
    assert theta.shape[0] == batch_size

    # Test log prob on batch of thetas.
    log_probs = prior.log_prob(theta)
    assert isinstance(log_probs, Tensor)
    assert log_probs.shape[0] == batch_size

    # Test return type
    assert prior.sample().dtype == torch.float32

    # Test support check.
    within_support(prior, prior.sample((2,)))
    # Test transform
    mcmc_transform(prior)
Esempio n. 5
0
def samples_true_posterior_linear_gaussian_uniform_prior(
    x_o: Tensor,
    likelihood_shift: Tensor,
    likelihood_cov: Tensor,
    prior: Union[Uniform, Independent],
    num_samples: int = 1000,
) -> Tensor:
    """
    Returns ground truth posterior samples for Gaussian likelihood and uniform prior.

    Args:
        x_o: The observation.
        likelihood_shift: Mean of the likelihood p(x|theta) is likelihood_shift+theta.
        likelihood_cov: Covariance matrix of likelihood.
        prior: Uniform prior distribution.
        num_samples: Desired number of samples.

    Returns: Samples from posterior.
    """

    # Let s denote the likelihood_shift:
    # The likelihood has the term (x-(s+theta))^2 in the exponent of the Gaussian.
    # In other words, as a function of x, the mean of the likelihood is s+theta.
    # For computing the posterior we need the likelihood as a function of theta. Hence:
    # (x-(s+theta))^2 = (theta-(-s+x))^2
    # We see that the mean is -s+x = x-s

    # Take into account iid trials
    x_o = atleast_2d(x_o)
    num_trials, *_ = x_o.shape
    x_o_mean = x_o.mean(0)
    likelihood_mean = x_o_mean - likelihood_shift

    posterior = MultivariateNormal(loc=likelihood_mean,
                                   covariance_matrix=1 / num_trials *
                                   likelihood_cov)

    # generate samples from ND Gaussian truncated by prior support
    num_remaining = num_samples
    samples = []

    while num_remaining > 0:
        candidate_samples = posterior.sample(
            sample_shape=torch.Size((num_remaining, )))
        is_in_prior = within_support(prior, candidate_samples)
        # accept if in prior
        if is_in_prior.sum():
            samples.append(candidate_samples[is_in_prior, :])
            num_remaining -= int(is_in_prior.sum().item())

    return torch.cat(samples)
Esempio n. 6
0
def get_prob_outside_uniform_prior(posterior: NeuralPosterior,
                                   prior: BoxUniform, num_dim: int) -> Tensor:
    """
    Return posterior probability for a parameter set outside of the prior support.

    Args:
        posterior: estimated posterior
        num_dim: dimensionality of the problem
    """
    # Test whether likelihood outside prior support is zero.
    assert isinstance(prior, BoxUniform)
    sample_outside_support = 1.1 * prior.base_dist.low
    assert not within_support(
        prior,
        sample_outside_support).all(), "Samples must be outside of support."

    return torch.exp(posterior.log_prob(sample_outside_support))
Esempio n. 7
0
    num_sampled_total, num_remaining = 0, num_samples
    accepted, acceptance_rate = [], float("Nan")
    leakage_warning_raised = False

    # To cover cases with few samples without leakage:
    sampling_batch_size = min(num_samples, max_sampling_batch_size)
    while num_remaining > 0:

        # Sample and reject.
        candidates = posterior_nn.sample(sampling_batch_size,
                                         context=x).reshape(
                                             sampling_batch_size, -1)

        # SNPE-style rejection-sampling when the proposal is the neural net.
        are_within_prior = within_support(prior, candidates)
        samples = candidates[are_within_prior]

        accepted.append(samples)

        # Update.
        num_sampled_total += sampling_batch_size
        num_remaining -= samples.shape[0]
        pbar.update(samples.shape[0])

        # To avoid endless sampling when leakage is high, we raise a warning if the
        # acceptance rate is too low after the first 1_000 samples.
        acceptance_rate = (num_samples - num_remaining) / num_sampled_total

        # For remaining iterations (leakage or many samples) continue
        # sampling with fixed batch size, reduced in cased the number
Esempio n. 8
0
    def log_prob(
        self,
        theta: Tensor,
        x: Optional[Tensor] = None,
        norm_posterior: bool = True,
        track_gradients: bool = False,
        leakage_correction_params: Optional[dict] = None,
    ) -> Tensor:
        r"""
        Returns the log-probability of the posterior $p(\theta|x).$

        Args:
            theta: Parameters $\theta$.
            x: Conditioning context for posterior $p(\theta|x)$. If not provided,
                fall back onto `x` passed to `set_default_x()`.
            norm_posterior: Whether to enforce a normalized posterior density.
                Renormalization of the posterior is useful when some
                probability falls out or leaks out of the prescribed prior support.
                The normalizing factor is calculated via rejection sampling, so if you
                need speedier but unnormalized log posterior estimates set here
                `norm_posterior=False`. The returned log posterior is set to
                -∞ outside of the prior support regardless of this setting.
            track_gradients: Whether the returned tensor supports tracking gradients.
                This can be helpful for e.g. sensitivity analysis, but increases memory
                consumption.
            leakage_correction_params: A `dict` of keyword arguments to override the
                default values of `leakage_correction()`. Possible options are:
                `num_rejection_samples`, `force_update`, `show_progress_bars`, and
                `rejection_sampling_batch_size`.
                These parameters only have an effect if `norm_posterior=True`.

        Returns:
            `(len(θ),)`-shaped log posterior probability $\log p(\theta|x)$ for θ in the
            support of the prior, -∞ (corresponding to 0 probability) outside.

        """

        # TODO Train exited here, entered after sampling?
        self.net.eval()

        theta, x = self._prepare_theta_and_x_for_log_prob_(theta, x)

        with torch.set_grad_enabled(track_gradients):

            # Evaluate on device, move back to cpu for comparison with prior.
            unnorm_log_prob = self.net.log_prob(theta.to(self._device),
                                                x.to(self._device)).cpu()

            # Force probability to be zero outside prior support.
            in_prior_support = within_support(self._prior, theta)

            masked_log_prob = torch.where(
                in_prior_support,
                unnorm_log_prob,
                torch.tensor(float("-inf"), dtype=torch.float32),
            )

            if leakage_correction_params is None:
                leakage_correction_params = dict()  # use defaults
            log_factor = (log(
                self.leakage_correction(x=batched_first_of_batch(x),
                                        **leakage_correction_params))
                          if norm_posterior else 0)

            return masked_log_prob - log_factor