예제 #1
0
    def slice_np_mcmc(
        self,
        num_samples: int,
        potential_function: Callable,
        context: torch.Tensor,
        thin: int = 10,
        warmup_steps: int = 20,
    ) -> Tensor:

        # go into eval mode for evaluating during sampling
        # XXX set eval mode outside of calls to sample
        self.neural_net.eval()

        posterior_sampler = SliceSampler(
            utils.tensor2numpy(self._prior.sample((1,))).reshape(-1),
            lp_f=potential_function,
            thin=thin,
        )

        posterior_sampler.gen(warmup_steps)

        samples = posterior_sampler.gen(num_samples)

        # back to training mode
        # XXX train exited in log_prob, entered here?
        self.neural_net.train(True)

        return torch.tensor(samples, dtype=torch.float32)
예제 #2
0
파일: slice_numpy.py 프로젝트: bkmi/sbi
 def run_slice_np(inits, seed):
     # Seed current job.
     np.random.seed(seed)
     posterior_sampler = SliceSampler(
         tensor2numpy(inits).reshape(-1),
         lp_f=potential_function,
         thin=thin,
         # Show pbars of workers only for single worker
         verbose=show_progress_bars and num_workers == 1,
     )
     if warmup_steps > 0:
         posterior_sampler.gen(int(warmup_steps))
     return posterior_sampler.gen(ceil(num_samples / num_chains))
예제 #3
0
    def _slice_np_mcmc(
        self,
        num_samples: int,
        potential_function: Callable,
        thin: int,
        warmup_steps: int,
    ) -> Tensor:
        """
        Custom implementation of slice sampling using Numpy.

        Args:
            num_samples: Desired number of samples.
            potential_function: A callable **class**.
            thin: Thinning (subsampling) factor.
            warmup_steps: Initial number of samples to discard.

        Returns: Tensor of shape (num_samples, shape_of_single_theta).
        """
        # Go into eval mode for evaluating during sampling
        self.net.eval()

        num_chains = self._mcmc_init_params.shape[0]
        dim_samples = self._mcmc_init_params.shape[1]

        all_samples = []
        for c in range(num_chains):
            posterior_sampler = SliceSampler(
                utils.tensor2numpy(self._mcmc_init_params[c, :]).reshape(-1),
                lp_f=potential_function,
                thin=thin,
            )
            if warmup_steps > 0:
                posterior_sampler.gen(int(warmup_steps))
            all_samples.append(
                posterior_sampler.gen(int(num_samples / num_chains)))
        all_samples = np.stack(all_samples).astype(np.float32)

        samples = torch.from_numpy(all_samples)  # chains x samples x dim

        # Final sample will be next init location
        self._mcmc_init_params = samples[:, -1, :].reshape(
            num_chains, dim_samples)

        samples = samples.reshape(-1, dim_samples)[:num_samples, :]
        assert samples.shape[0] == num_samples

        # Back to training mode
        self.net.train(True)

        return samples.type(torch.float32)
예제 #4
0
    def _slice_np_mcmc(
        self,
        num_samples: int,
        potential_function: Callable,
        initial_params: Tensor,
        thin: int,
        warmup_steps: int,
    ) -> Tensor:
        """
        Custom implementation of slice sampling using Numpy.

        Args:
            num_samples: Desired number of samples.
            potential_function: A callable **class**.
            initial_params: Initial parameters for MCMC chain.
            thin: Thinning (subsampling) factor.
            warmup_steps: Initial number of samples to discard.

        Returns: Tensor of shape (num_samples, shape_of_single_theta).
        """

        num_chains = initial_params.shape[0]
        dim_samples = initial_params.shape[1]

        all_samples = []
        for c in range(num_chains):
            posterior_sampler = SliceSampler(
                utils.tensor2numpy(initial_params[c, :]).reshape(-1),
                lp_f=potential_function,
                thin=thin,
            )
            if warmup_steps > 0:
                posterior_sampler.gen(int(warmup_steps))
            all_samples.append(
                posterior_sampler.gen(int(num_samples / num_chains)))
        all_samples = np.stack(all_samples).astype(np.float32)

        samples = torch.from_numpy(all_samples)  # chains x samples x dim

        # Save sample as potential next init (if init_strategy == 'latest_sample').
        self._mcmc_init_params = samples[:, -1, :].reshape(
            num_chains, dim_samples)

        samples = samples.reshape(-1, dim_samples)[:num_samples, :]
        assert samples.shape[0] == num_samples

        return samples.type(torch.float32)
예제 #5
0
파일: slice_numpy.py 프로젝트: bkmi/sbi
 def run_slice_np_vectorized(inits, seed):
     # Seed current job.
     np.random.seed(seed)
     posterior_sampler = SliceSamplerVectorized(
         init_params=tensor2numpy(inits),
         log_prob_fn=potential_function,
         num_chains=inits.shape[0],
         # Show pbars of workers only for single worker
         verbose=show_progress_bars and num_workers == 1,
     )
     warmup_ = warmup_steps * thin
     num_samples_ = ceil((num_samples * thin) / num_chains)
     samples = posterior_sampler.run(warmup_ + num_samples_)
     samples = samples[:, warmup_:, :]  # discard warmup steps
     samples = samples[:, ::thin, :]  # thin chains
     samples = torch.from_numpy(samples)  # chains x samples x dim
     return samples
예제 #6
0
    def np_potential(self, theta: np.ndarray) -> ScalarFloat:
        r"""
        Return conditional posterior log-probability or $-\infty$ if outside prior.

        Args:
            theta: Free parameters $\theta_i$, batch dimension 1.

        Returns:
            Conditional posterior log-probability $\log(p(\theta_i|\theta_j, x))$,
            masked outside of prior.
        """
        theta = torch.as_tensor(theta, dtype=torch.float32)

        theta_condition = deepcopy(self.condition)
        theta_condition[:, self.dims_to_sample] = theta

        return self.potential_fn_provider.np_potential(
            utils.tensor2numpy(theta_condition))
예제 #7
0
    def _slice_np_mcmc(
        self,
        num_samples: int,
        potential_function: Callable,
        initial_params: Tensor,
        thin: int,
        warmup_steps: int,
        vectorized: bool = False,
        show_progress_bars: bool = True,
    ) -> Tensor:
        """
        Custom implementation of slice sampling using Numpy.

        Args:
            num_samples: Desired number of samples.
            potential_function: A callable **class**.
            initial_params: Initial parameters for MCMC chain.
            thin: Thinning (subsampling) factor.
            warmup_steps: Initial number of samples to discard.
            vectorized: Whether to use a vectorized implementation of
                the Slice sampler (still experimental).
            show_progress_bars: Whether to show a progressbar during sampling;
                can only be turned off for vectorized sampler.

        Returns: Tensor of shape (num_samples, shape_of_single_theta).
        """
        num_chains = initial_params.shape[0]
        dim_samples = initial_params.shape[1]

        if not vectorized:  # Sample all chains sequentially
            all_samples = []
            for c in range(num_chains):
                posterior_sampler = SliceSampler(
                    utils.tensor2numpy(initial_params[c, :]).reshape(-1),
                    lp_f=potential_function,
                    thin=thin,
                    verbose=show_progress_bars,
                )
                if warmup_steps > 0:
                    posterior_sampler.gen(int(warmup_steps))
                all_samples.append(
                    posterior_sampler.gen(ceil(num_samples / num_chains))
                )
            all_samples = np.stack(all_samples).astype(np.float32)
            samples = torch.from_numpy(all_samples)  # chains x samples x dim
        else:  # Sample all chains at the same time
            posterior_sampler = SliceSamplerVectorized(
                init_params=utils.tensor2numpy(initial_params),
                log_prob_fn=potential_function,
                num_chains=num_chains,
                verbose=show_progress_bars,
            )
            warmup_ = warmup_steps * thin
            num_samples_ = ceil((num_samples * thin) / num_chains)
            samples = posterior_sampler.run(warmup_ + num_samples_)
            samples = samples[:, warmup_:, :]  # discard warmup steps
            samples = samples[:, ::thin, :]  # thin chains
            samples = torch.from_numpy(samples)  # chains x samples x dim

        # Save sample as potential next init (if init_strategy == 'latest_sample').
        self._mcmc_init_params = samples[:, -1, :].reshape(num_chains, dim_samples)

        samples = samples.reshape(-1, dim_samples)[:num_samples, :]
        assert samples.shape[0] == num_samples

        return samples.type(torch.float32)
예제 #8
0
def summarize(
    summary_writer,
    summary,
    round_,
    true_observation,
    parameter_bank,
    observation_bank,
    simulator,
    posterior_samples_acceptance_rate=None,
):
    # get ground truth if available
    try:
        (
            _,
            prior,
            ground_truth_parameters,
            ground_truth_observation,
        ) = simulators.get_simulator_prior_and_groundtruth(simulator.name)
    # Update summaries.
    except:
        pass

    try:
        mmd = utils.unbiased_mmd_squared(
            parameter_bank[-1],
            simulator.get_ground_truth_posterior_samples(num_samples=1000),
        )
        summary["mmds"].append(mmd.item())
    except:
        pass

    try:
        # Median |x - x0| for most recent round.
        median_observation_distance = torch.median(
            torch.sqrt(
                torch.sum(
                    (observation_bank[-1] -
                     true_observation.reshape(1, -1))**2,
                    dim=-1,
                )))
        summary["median_observation_distances"].append(
            median_observation_distance.item())

        summary_writer.add_scalar(
            tag="median_observation_distance",
            scalar_value=summary["median_observation_distances"][-1],
            global_step=round_ + 1,
        )

    except:
        pass

    try:
        # KDE estimate of negative log prob true parameters using
        # parameters from most recent round.

        negative_log_prob_true_parameters = -utils.gaussian_kde_log_eval(
            samples=parameter_bank[-1],
            query=ground_truth_parameters.reshape(1, -1),
        )
        summary["negative_log_probs_true_parameters"].append(
            negative_log_prob_true_parameters.item())

        summary_writer.add_scalar(
            tag="negative_log_prob_true_parameters",
            scalar_value=summary["negative_log_probs_true_parameters"][-1],
            global_step=round_ + 1,
        )
    except:
        pass

    try:
        # Rejection sampling acceptance rate
        summary["rejection_sampling_acceptance-rates"].append(
            posterior_samples_acceptance_rate)

        summary_writer.add_scalar(
            tag="rejection_sampling_acceptance_rate",
            scalar_value=summary["rejection_sampling_acceptance_rates"][-1],
            global_step=round_ + 1,
        )
    except:
        pass

    try:
        # Plot most recently sampled parameters.
        parameters = utils.tensor2numpy(parameter_bank[-1])
        figure = utils.plot_hist_marginals(
            data=parameters,
            ground_truth=utils.tensor2numpy(ground_truth_parameters).reshape(
                -1),
            lims=simulator.parameter_plotting_limits,
        )
        summary_writer.add_figure(tag="posterior_samples",
                                  figure=figure,
                                  global_step=round_ + 1)
    except:
        pass

    # Write quantities using SummaryWriter.
    summary_writer.add_scalar(
        tag="epochs_trained",
        scalar_value=summary["epochs"][-1],
        global_step=round_ + 1,
    )

    summary_writer.add_scalar(
        tag="best_validation_log_prob",
        scalar_value=summary["best_validation_log_probs"][-1],
        global_step=round_ + 1,
    )

    if summary["mmds"]:
        summary_writer.add_scalar(
            tag="mmd",
            scalar_value=summary["mmds"][-1],
            global_step=round_ + 1,
        )

    summary_writer.flush()

    return summary_writer, summary