Exemple #1
0
#!/usr/bin/env python

# Example of using nessai with `reparameterisations` dictionary. This example
# uses the same model as the half_gaussian example.

import numpy as np
from scipy.stats import norm

from nessai.flowsampler import FlowSampler
from nessai.model import Model
from nessai.utils import setup_logger

output = './outdir/reparameterisations_example/'
logger = setup_logger(output=output)


class HalfGaussian(Model):
    """Two-dimensional Gaussian with a bound at y=0."""
    def __init__(self):
        self.names = ['x', 'y']
        self.bounds = {'x': [-10, 10], 'y': [0, 10]}

    def log_prior(self, x):
        """Log-prior"""
        log_p = np.log(self.in_bounds(x))
        for bounds in self.bounds.values():
            log_p -= np.log(bounds[1] - bounds[0])
        return log_p

    def log_likelihood(self, x):
        """Log-likelihood"""
Exemple #2
0
    def run_sampler(self):
        from nessai.flowsampler import FlowSampler
        from nessai.model import Model as BaseModel
        from nessai.livepoint import dict_to_live_points, live_points_to_array
        from nessai.posterior import compute_weights
        from nessai.utils import setup_logger

        class Model(BaseModel):
            """A wrapper class to pass our log_likelihood and priors into nessai

            Parameters
            ----------
            names : list of str
                List of parameters to sample
            priors : :obj:`bilby.core.prior.PriorDict`
                Priors to use for sampling. Needed for the bounds and the
                `sample` method.
            """
            def __init__(self, names, priors):
                self.names = names
                self.priors = priors
                self._update_bounds()

            @staticmethod
            def log_likelihood(x, **kwargs):
                """Compute the log likelihood"""
                theta = [x[n].item() for n in self.search_parameter_keys]
                return self.log_likelihood(theta)

            @staticmethod
            def log_prior(x, **kwargs):
                """Compute the log prior"""
                theta = {n: x[n] for n in self._search_parameter_keys}
                return self.log_prior(theta)

            def _update_bounds(self):
                self.bounds = {
                    key: [self.priors[key].minimum, self.priors[key].maximum]
                    for key in self.names
                }

            def new_point(self, N=1):
                """Draw a point from the prior"""
                prior_samples = self.priors.sample(size=N)
                samples = {n: prior_samples[n] for n in self.names}
                return dict_to_live_points(samples)

            def new_point_log_prob(self, x):
                """Proposal probability for new the point"""
                return self.log_prior(x)

        # Setup the logger for nessai using the same settings as the bilby logger
        setup_logger(self.outdir,
                     label=self.label,
                     log_level=logger.getEffectiveLevel())
        model = Model(self.search_parameter_keys, self.priors)
        out = None
        while out is None:
            try:
                out = FlowSampler(model, **self.kwargs)
            except TypeError as e:
                raise TypeError(
                    "Unable to initialise nessai sampler with error: {}".
                    format(e))
        try:
            out.run(save=True, plot=self.plot)
        except SystemExit as e:
            import sys
            logger.info("Caught exit code {}, exiting with signal {}".format(
                e.args[0], self.exit_code))
            sys.exit(self.exit_code)

        # Manually set likelihood evaluations because parallelisation breaks the counter
        self.result.num_likelihood_evaluations = out.ns.likelihood_evaluations[
            -1]

        self.result.samples = live_points_to_array(out.posterior_samples,
                                                   self.search_parameter_keys)
        self.result.log_likelihood_evaluations = out.posterior_samples['logL']
        self.result.nested_samples = DataFrame(out.nested_samples)
        self.result.nested_samples.rename(columns=dict(logL='log_likelihood',
                                                       logP='log_prior'),
                                          inplace=True)
        _, log_weights = compute_weights(
            np.array(self.result.nested_samples.log_likelihood),
            np.array(out.ns.state.nlive))
        self.result.nested_samples['weights'] = np.exp(log_weights)
        self.result.log_evidence = out.ns.log_evidence
        self.result.log_evidence_err = np.sqrt(out.ns.information /
                                               out.ns.nlive)

        return self.result