예제 #1
0
 def run_sampler(self):
     import pypolychord
     from pypolychord.settings import PolyChordSettings
     if self.kwargs['use_polychord_defaults']:
         settings = PolyChordSettings(nDims=self.ndim,
                                      nDerived=self.ndim,
                                      base_dir=self._sample_file_directory,
                                      file_root=self.label)
     else:
         self._setup_dynamic_defaults()
         pc_kwargs = self.kwargs.copy()
         pc_kwargs['base_dir'] = self._sample_file_directory
         pc_kwargs['file_root'] = self.label
         pc_kwargs.pop('use_polychord_defaults')
         settings = PolyChordSettings(nDims=self.ndim,
                                      nDerived=self.ndim,
                                      **pc_kwargs)
     self._verify_kwargs_against_default_kwargs()
     out = pypolychord.run_polychord(loglikelihood=self.log_likelihood,
                                     nDims=self.ndim,
                                     nDerived=self.ndim,
                                     settings=settings,
                                     prior=self.prior_transform)
     self.result.log_evidence = out.logZ
     self.result.log_evidence_err = out.logZerr
     log_likelihoods, physical_parameters = self._read_sample_file()
     self.result.log_likelihood_evaluations = log_likelihoods
     self.result.samples = physical_parameters
     self.calc_likelihood_count()
     return self.result
예제 #2
0
    def run_sampler(self):
        import pypolychord
        from pypolychord.settings import PolyChordSettings
        if self.kwargs['use_polychord_defaults']:
            settings = PolyChordSettings(nDims=self.ndim,
                                         nDerived=self.ndim,
                                         base_dir=self.outdir,
                                         file_root=self.label)
        else:
            self._setup_dynamic_defaults()
            pc_kwargs = self.kwargs.copy()
            pc_kwargs['base_dir'] = self.outdir
            pc_kwargs['file_root'] = self.label
            pc_kwargs.pop('use_polychord_defaults')
            settings = PolyChordSettings(nDims=self.ndim,
                                         nDerived=self.ndim,
                                         **pc_kwargs)
        self._verify_kwargs_against_default_kwargs()

        pypolychord.run_polychord(loglikelihood=self.log_likelihood,
                                  nDims=self.ndim,
                                  nDerived=self.ndim,
                                  settings=settings,
                                  prior=self.prior_transform)

        self.result.log_evidence, self.result.log_evidence_err = self._read_out_stats_file(
        )
        self.result.samples = self._read_sample_file()
        return self.result
예제 #3
0
 def __init__(self, mu, cov, file_root='untitled'):
     self.mu = mu if isinstance(mu, numpy.ndarray) else numpy.array(mu)
     self.cov = cov if isinstance(cov, numpy.ndarray) else numpy.array(cov)
     self.isDiagonal = False
     self.nDims = self.mu.size
     shape = self.cov.shape
     if (len(shape) > 2):
         raise ValueError(
             "Covariance matrix has higher rank than a 2-tensor. {}".format(
                 shape))
     if len(shape) == 2:
         a, b = shape
         if a != b:
             raise ValueError(
                 "Covariance matrix not square. It has shape {}x{}".format(
                     a, b))
         if a != self.mu.size:
             raise ValueError(
                 "Covariance matrix has different dimesnions to mean vector. {}x{} vs {}"
                 .format(a, b, self.mu.size))
     else:
         if self.cov.size != self.mu.size:
             raise ValueError(
                 "Incompatible standard deviation vector. [sigma]={}, [mu]={}"
                 .format(self.cov.size, self.mu.size))
         else:
             self.isDiagonal = True
     self.__invCov = numpy.linalg.inv(
         self.cov) if not self.isDiagonal else numpy.linalg.inv(
             numpy.diag(self.cov))
     self.settings = PolyChordSettings(self.nDims, 0)
     self.settings.file_root = file_root
     self.settings.read_resume = False
     self.settings.do_clustering = True
     self.settings.nlive = 20
예제 #4
0
    def __init__(self, sampled_parameters, loglikelihood, population_size):
        """Initialize the PolyChord Nested Sampler."""
        self.sampled_parameters = sampled_parameters
        self.loglikelihood = loglikelihood
        self.population_size = population_size

        self._nDims = len(sampled_parameters)
        self._nDerived = 0
        self._post_eval = False
        self._posteriors = None
        #if self.population_size is None:
        #    self.population_size = 25*self._nDims
        # make the likelihood function for polychord
        def likelihood(theta):
            r2 = 0
            return loglikelihood(theta), [r2]
        self._likelihood = likelihood
        # make the prior for polychord
        def prior(hypercube):
            return np.array([self.sampled_parameters[i].invcdf(value) for i,value in enumerate(hypercube)])

        self._prior = prior
        # PolyChord settings object
        self._settings = PolyChordSettings(self._nDims, self._nDerived,
                                           nlive=self.population_size)
        self._settings.file_root = 'polychord_run' #string
        self._settings.do_clustering = True
        self._settings.read_resume = False
        # Make the polychord dumper function
        # param : array, array, array, float, float
        def dumper(live, dead, logweights, logZ, logZerr):
            print("Last dead point:", dead[-1]) # prints last element of dead (wich is an array)

        self._dumper = dumper
        return
예제 #5
0
 def __init__(self, mu, sigma, b, a, file_root=''):
     # super(RepartitionedGaussianModel, self).__init__(self, mu, cov)
     self.mu = mu
     if not isinstance(self.mu, numpy.ndarray):
         self.mu = numpy.array(self.mu)
     self.sigma = sigma
     if not isinstance(self.sigma, numpy.ndarray):
         self.sigma = numpy.array(self.sigma)
     if len(self.sigma.shape) != 1:
         raise ValueError(
             "Standard deviations not provided as a vector, maybe you meant to use CorrelatedGaussianModel? {}"
             .format(sigma))
     else:
         if self.mu.size != self.sigma.size:
             raise ValueError(
                 "Incompatible sizes of standard deviation and mean vectors. [mu]={} vs [sigma]={}"
                 .format(self.mu.size, self.sigma.size))
     self.nDims = self.mu.size
     self.settings = PolyChordSettings(self.nDims, 0)
     self.settings.file_root = file_root
     self.settings.read_resume = False
     self.settings.do_clustering = True
     self.settings.nlive = 20
     self.upper_bounds = b
     self.lower_bounds = a
예제 #6
0
    def _get_sampler(self, **kwargs):
        if "file_root" in kwargs:
            warnings.warn("file_root was defined in sampler_kwargs, "
                          "but is replaced by output_prefix")
            del kwargs["file_root"]

        if "base_dir" in kwargs:
            warnings.warn("base_dir was defined in sampler_kwargs, "
                          "but is replaced by output_prefix")
            del kwargs["base_dir"]

        return PolyChordSettings(
            self.nparams,
            self.nderived,
            base_dir=str(self.output_dir),
            file_root=str(self.output_file_prefix),
            nlive=kwargs.pop("nlive", 100 * self.nparams),
            **kwargs,
        )
    def __init__(self, mu, cov, a, b, **kwargs):
        self.mu = numpy.array(mu)
        self.cov = numpy.array(cov)
        self.invCov = numpy.linalg.inv(cov)
        self.sigma = numpy.diag(self.cov)
        self.gaussian_norm = numpy.linalg.slogdet(
            2 * numpy.pi * self.cov)[1] / 2
        self.nDims = self.mu.size

        def unkwarg(arg, default):
            return default if arg not in kwargs else kwargs[arg]

        self.settings = PolyChordSettings(self.nDims, 0)
        self.settings.file_root = unkwarg('file_root', 'test')
        self.settings.nlive = unkwarg('live_points', 10**3)
        self.settings.read_resume = unkwarg('read_resume', False)
        self.settings.do_clustering = unkwarg('do_clustering', True)
        self.settings.feedback = unkwarg('feedback', 0)
        self.a = a
        self.b = b
    def run_polychord(num_live_points):
        try:
            import pypolychord
            from pypolychord.settings import PolyChordSettings
            from pypolychord.priors import UniformPrior
        except:
            raise ImportError("Polychord not installed.\nRun `git clone https://github.com/PolyChord/PolyChordLite.git \ncd PolyChordLite\npython setup.py install`.")

        def likelihood(theta):
            """ Simple Gaussian Likelihood"""
            nDims = len(theta)
            r2 = sum(theta**2)
            logL = -log(2*pi*sigma*sigma)*nDims/2.0
            logL += -r2/2/sigma/sigma
            return logL, [r2]


        def prior(hypercube):
            """ Uniform prior from [-1,1]^D. """
            return UniformPrior(-1, 1)(hypercube)


        def dumper(live, dead, logweights, logZ, logZerr):
            return
            # print("Last dead point:", dead[-1])

        settings = PolyChordSettings(ndims, 1)
        settings.file_root = 'gaussian'
        settings.nlive = num_live_points
        settings.do_clustering = True
        settings.read_resume = False

        t0 = default_timer()
        output = pypolychord.run_polychord(likelihood, ndims, 1, settings, prior, dumper)
        run_time = default_timer() - t0
        print("polychord log(Z):", output.logZ)
        return run_time
예제 #9
0
 def initialize(self):
     """Imports the PolyChord sampler and prepares its arguments."""
     if am_single_or_primary_process(
     ):  # rank = 0 (MPI master) or None (no MPI)
         self.log.info("Initializing")
     # If path not given, try using general path to modules
     if not self.path and self.path_install:
         self.path = get_path(self.path_install)
     if self.path:
         if am_single_or_primary_process():
             self.log.info("Importing *local* PolyChord from " + self.path)
             if not os.path.exists(os.path.realpath(self.path)):
                 raise LoggedError(
                     self.log, "The given path does not exist. "
                     "Try installing PolyChord with "
                     "'cobaya-install polychord -m [modules_path]")
         pc_build_path = get_build_path(self.path)
         if not pc_build_path:
             raise LoggedError(
                 self.log, "Either PolyChord is not in the given folder, "
                 "'%s', or you have not compiled it.", self.path)
         # Inserting the previously found path into the list of import folders
         sys.path.insert(0, pc_build_path)
     else:
         self.log.info("Importing *global* PolyChord.")
     try:
         import pypolychord
         from pypolychord.settings import PolyChordSettings
         self.pc = pypolychord
     except ImportError:
         raise LoggedError(
             self.log, "Couldn't find the PolyChord python interface. "
             "Make sure that you have compiled it, and that you either\n"
             " (a) specify a path (you didn't) or\n"
             " (b) install the Python interface globally with\n"
             "     '/path/to/PolyChord/python setup.py install --user'")
     # Prepare arguments and settings
     self.nDims = self.model.prior.d()
     self.nDerived = (len(self.model.parameterization.derived_params()) +
                      len(self.model.prior) +
                      len(self.model.likelihood._likelihoods))
     if self.logzero is None:
         self.logzero = np.nan_to_num(-np.inf)
     if self.max_ndead == np.inf:
         self.max_ndead = -1
     for p in ["nlive", "nprior", "max_ndead"]:
         setattr(self, p,
                 read_dnumber(getattr(self, p), self.nDims, dtype=int))
     # Fill the automatic ones
     if getattr(self, "feedback", None) is None:
         values = {
             logging.CRITICAL: 0,
             logging.ERROR: 0,
             logging.WARNING: 0,
             logging.INFO: 1,
             logging.DEBUG: 2
         }
         self.feedback = values[self.log.getEffectiveLevel()]
     try:
         output_folder = getattr(self.output, "folder")
         output_prefix = getattr(self.output, "prefix") or ""
         self.read_resume = self.resuming
     except AttributeError:
         # dummy output -- no resume!
         self.read_resume = False
         from tempfile import gettempdir
         output_folder = gettempdir()
         if am_single_or_primary_process():
             from random import random
             output_prefix = hex(int(random() * 16**6))[2:]
         else:
             output_prefix = None
         if more_than_one_process():
             output_prefix = get_mpi_comm().bcast(output_prefix, root=0)
     self.base_dir = os.path.join(output_folder, self.base_dir)
     self.file_root = output_prefix
     if am_single_or_primary_process():
         # Creating output folder, if it does not exist (just one process)
         if not os.path.exists(self.base_dir):
             os.makedirs(self.base_dir)
         # Idem, a clusters folder if needed -- notice that PolyChord's default
         # is "True", here "None", hence the funny condition below
         if self.do_clustering is not False:  # None here means "default"
             try:
                 os.makedirs(os.path.join(self.base_dir, clusters))
             except OSError:  # exists!
                 pass
         self.log.info("Storing raw PolyChord output in '%s'.",
                       self.base_dir)
     # Exploiting the speed hierarchy
     if self.blocking:
         speeds, blocks = self.model.likelihood._check_speeds_of_params(
             self.blocking)
     else:
         speeds, blocks = self.model.likelihood._speeds_of_params(
             int_speeds=True)
     blocks_flat = list(chain(*blocks))
     self.ordering = [
         blocks_flat.index(p)
         for p in self.model.parameterization.sampled_params()
     ]
     self.grade_dims = np.array([len(block) for block in blocks])
     # bugfix: pypolychord's C interface for Fortran does not like int numpy types
     self.grade_dims = [int(x) for x in self.grade_dims]
     # Steps per block
     # NB: num_repeats is ignored by PolyChord when int "grade_frac" given,
     # so needs to be applied by hand.
     # Make sure that speeds are integer, and that the slowest is 1,
     # for a straightforward application of num_repeats
     speeds = relative_to_int(speeds, 1)
     # In num_repeats, `d` is interpreted as dimension of each block
     self.grade_frac = [
         int(speed * read_dnumber(self.num_repeats, dim_block))
         for speed, dim_block in zip(speeds, self.grade_dims)
     ]
     # Assign settings
     pc_args = [
         "nlive", "num_repeats", "nprior", "do_clustering",
         "precision_criterion", "max_ndead", "boost_posterior", "feedback",
         "logzero", "posteriors", "equals", "compression_factor",
         "cluster_posteriors", "write_resume", "read_resume", "write_stats",
         "write_live", "write_dead", "base_dir", "grade_frac", "grade_dims",
         "feedback", "read_resume", "base_dir", "file_root", "grade_frac",
         "grade_dims"
     ]
     # As stated above, num_repeats is ignored, so let's not pass it
     pc_args.pop(pc_args.index("num_repeats"))
     self.pc_settings = PolyChordSettings(
         self.nDims,
         self.nDerived,
         seed=(self.seed if self.seed is not None else -1),
         **{
             p: getattr(self, p)
             for p in pc_args if getattr(self, p) is not None
         })
     # prior conversion from the hypercube
     bounds = self.model.prior.bounds(
         confidence_for_unbounded=self.confidence_for_unbounded)
     # Check if priors are bounded (nan's to inf)
     inf = np.where(np.isinf(bounds))
     if len(inf[0]):
         params_names = self.model.parameterization.sampled_params()
         params = [params_names[i] for i in sorted(list(set(inf[0])))]
         raise LoggedError(
             self.log,
             "PolyChord needs bounded priors, but the parameter(s) '"
             "', '".join(params) + "' is(are) unbounded.")
     locs = bounds[:, 0]
     scales = bounds[:, 1] - bounds[:, 0]
     # This function re-scales the parameters AND puts them in the right order
     self.pc_prior = lambda x: (locs + np.array(x)[self.ordering] * scales
                                ).tolist()
     # We will need the volume of the prior domain, since PolyChord divides by it
     self.logvolume = np.log(np.prod(scales))
     # Prepare callback function
     if self.callback_function is not None:
         self.callback_function_callable = (get_external_function(
             self.callback_function))
     self.last_point_callback = 0
     # Prepare runtime live and dead points collections
     self.live = Collection(self.model,
                            None,
                            name="live",
                            initial_size=self.pc_settings.nlive)
     self.dead = Collection(self.model, self.output, name="dead")
     self.n_sampled = len(self.model.parameterization.sampled_params())
     self.n_derived = len(self.model.parameterization.derived_params())
     self.n_priors = len(self.model.prior)
     self.n_likes = len(self.model.likelihood._likelihoods)
     # Done!
     if am_single_or_primary_process():
         self.log.info("Calling PolyChord with arguments:")
         for p, v in inspect.getmembers(self.pc_settings,
                                        lambda a: not (callable(a))):
             if not p.startswith("_"):
                 self.log.info("  %s: %s", p, v)
    return logL, [r2]  # float, array-like


# param : array
def prior(hypercube):
    """ Uniform prior from [-1,1]^D. """
    return UniformPrior(-1, 1)(hypercube)  # array


# param : array, array, array, float, float
def dumper(live, dead, logweights, logZ, logZerr):
    print("Last dead point:",
          dead[-1])  # prints last element of dead (wich is an array)


settings = PolyChordSettings(nDims, nDerived)  #settings is an object
settings.file_root = functionName  #string
settings.do_clustering = True
settings.read_resume = False

output = pypolychord.run_polychord(likelihood, nDims, nDerived, settings,
                                   prior, dumper)
paramnames = [('p%i' % i, r'\theta_%i' % i) for i in range(nDims)]
paramnames += [('r*', 'r')]
output.make_paramnames_files(paramnames)

try:
    import getdist.plots
    import matplotlib.pyplot as plt
    posterior = output.posterior
    g = getdist.plots.getSubplotPlotter()
예제 #11
0
    return logL, [r2]  # float, array-like


# param : array
def prior(hypercube):
    """ Uniform prior from [-1,1]^D. """
    return UniformPrior(-1, 1)(hypercube)  # array


# param : array, array, array, float, float
def dumper(live, dead, logweights, logZ, logZerr):
    print("Last dead point:",
          dead[-1])  # prints last element of dead (wich is an array)


settings = PolyChordSettings(nDims, nDerived)
settings.file_root = functionName
settings.do_clustering = True
settings.read_resume = False

output = pypolychord.run_polychord(likelihood, nDims, nDerived, settings,
                                   prior, dumper)
paramnames = [('p%i' % i, r'\theta_%i' % i) for i in range(nDims)]
paramnames += [('r*', 'r')]
output.make_paramnames_files(paramnames)

try:
    import getdist.plots
    import matplotlib.pyplot as plt
    posterior = output.posterior
    g = getdist.plots.getSubplotPlotter()
예제 #12
0
def pc(test_statistic,
       transform,
       n_dim,
       observed,
       n_live=100,
       base_dir="chains/",
       file_root="pc_",
       do_clustering=False,
       resume=False,
       ev_data=False,
       feedback=0,
       **kwargs):
    """
    Nested sampling with PC
    """
    # copy key word arguments to settings object
    settings = PolyChordSettings(n_dim, 0, **kwargs)
    settings.nfail = n_live
    settings.precision_criterion = 0.
    settings.read_resume = resume
    settings.base_dir = base_dir
    settings.file_root = file_root
    settings.nlive = n_live
    settings.logLstop = observed
    settings.do_clustering = do_clustering
    settings.feedback = feedback

    loglike = pc_wrap(test_statistic)
    output = pypolychord.run_polychord(loglike, n_dim, 0, settings, transform,
                                       dumper(0, observed))

    # get number of calls directly
    calls = output.nlike

    # get log X from resume file

    label = "=== local volume -- log(<X_p>) ==="
    log_xp = None
    res_name = "{}/{}.resume".format(base_dir, file_root)

    with open(res_name) as res_file:
        for line in res_file:
            if line.strip() == label:
                next_line = res_file.readline()
                log_xp = np.array([float(e) for e in next_line.split()])
                break
        else:
            raise RuntimeError("didn't find {}".format(label))

    log_x = logsumexp(log_xp)
    n_iter = -log_x * n_live

    if not ev_data:
        return Result.from_ns(n_iter, n_live, calls)

    # get ev data
    ev_name = "{}/{}_dead.txt".format(base_dir, file_root)
    ev_data = np.genfromtxt(ev_name)
    test_statistic = ev_data[:, 0]
    log_x = -np.arange(0, len(test_statistic), 1.) / n_live
    log_x_delta = np.sqrt(-log_x / n_live)

    return Result.from_ns(n_iter, n_live,
                          calls), [test_statistic, log_x, log_x_delta]
예제 #13
0
from pypolychord.settings import PolyChordSettings
import anesthetic
from scipy.special import erfinv
import time

# In this module I'm going to investigate the effect of the so-called
# Lasenby parameter as well as the effects of proper prior
# repartitioning, and benchmark to see a speedup.

# Vanilla

mu = numpy.array([1.0, 2.5])
cov = numpy.array([[1.0, 0.6], [0.6, 1.0]])
nDims = mu.size

settings = PolyChordSettings(nDims, 0)
settings.file_root = 'vanilla'
settings.nlive = 10**3
settings.read_resume = False
settings.do_clustering = True
settings.feedback = 0


def gaussian_likelihood(theta):
    invSig = numpy.linalg.inv(cov)
    norm = numpy.linalg.slogdet(2 * numpy.pi * cov)[1] / 2
    logL = -norm - (theta - mu) @ invSig @ (theta - mu) / 2
    return logL, []


def uniform_prior(point_in_hypercube):
예제 #14
0
    def PC_fit(self,nlive_const='auto', dynamic=True,dynamic_goal=1.0, ninit=100, 
                 basename='dypc_chains', verbose=True, plot=False):
        '''
        Parameters
        ----------
        dynamic_goal : float, opt
            Parameter in [0,1] determining whether algorithm prioritizes accuracy in 
            evidence accuracy (goal near 0) or parameter estimation (goal near 1).
        ninit : int, opt
            Number of live points to use in initial exploratory run. 
        nlive_const : int, opt
            Total computational budget, equivalent to non-dynamic nested sampling with nlive_const live points.
        dynamic : bool, opt
            If True, use dynamic nested sampling via dyPolyChord. Otherwise, use the
            standard PolyChord.
        basename : str, opt
            Location in which chains will be stored. 
        verbose : bool, opt
            If True, text will be output on the terminal to check how run is proceeding. 
        plot : bool, opt
            Display some sample plots to check result of dynamic slice nested sampling. 
        '''
        if dynamic:
            print('Dynamic slice nested sampling')
        else:
            print('Slice nested sampling')
            
        # obtain maximum likelihood fits
        theta0 = self.lineModel.guessFit()
        self.result_ml = self.optimizeFit(theta0)
        self.theta_ml = self.result_ml['x']

        # save theta_ml also in the lineModel object,
        # so that constraints may be set based on ML result
        self.lineModel.theta_ml = self.theta_ml
        
        # save moments obtained from maximum likelihood optimization
        self.m_ml = self.lineModel.modelMoments(self.theta_ml)
        
        # dimensionality of the problem
        self.ndim = int(self.lineModel.thetaLength())

        if dynamic:
            # dyPolyChord (dynamic slice nested sampling)
            # ------------------------------
            try:
                import dyPolyChord.pypolychord_utils
                import dyPolyChord
            except:
                print("********************")
                print("Could not import dyPolyChord! Make sure that this is in your PYTHONPATH.")
                print("PolyChord must also be on your LD_LIBRARY_PATH")
                raise ValueError("Abort BSFC fit")
        
            #Make a callable for running dyPolyChord
            my_callable = dyPolyChord.pypolychord_utils.RunPyPolyChord(
                self.PC_loglike,
                self.lineModel.hypercube_lnprior_generalized_simplex,
                self.ndim
            )
        
            # Specify sampler settings (see run_dynamic_ns.py documentation for more details)
            settings_dict = {'file_root': 'bsfc',
                             'base_dir': basename,
                             'seed': 1}

            # Run dyPolyChord
            MPI_parallel=True
            if MPI_parallel:
                from mpi4py import MPI
                comm = MPI.COMM_WORLD
                dyPolyChord.run_dypolychord(my_callable, dynamic_goal, settings_dict,
                                            ninit=ninit,
                                            nlive_const=int(25*self.ndim) if nlive_const=='auto' else nlive_const,
                                            comm=comm)
            else:
                dyPolyChord.run_dypolychord(my_callable, dynamic_goal, settings_dict,
                                            ninit=ninit,
                                            nlive_const=int(25*self.ndim) if nlive_const=='auto' else nlive_const)
                
        else:
            # PolyChord (slice nested sampling)
            # ------------------------------
            try:
                import pypolychord
                from pypolychord.settings import PolyChordSettings
            except:
                print("********************")
                print("Could not import pypolychord! Make sure that this is in your PYTHONPATH.")
                raise ValueError("Abort BSFC fit")
            
            nDerived=0
            settings = PolyChordSettings(self.ndim, nDerived)
            settings.file_root = 'bsfc'
            settings.base_dir = basename
            settings.nlive = int(25*self.ndim) if nlive_const=='auto' else int(nlive_const)
            #settings.do_clustering = True
            #settings.read_resume = False
            settings.feedback = 3
            
            def dumper(live, dead, logweights, logZ, logZerr):
                #print("Last dead point:", dead[-1])
                print("logZ = "+str(logZ)+"+/-"+str(logZerr))
                
            self.polychord_output = pypolychord.run_polychord(self.PC_loglike,
                                               self.ndim,
                                               nDerived,
                                               settings,
                                               self.lineModel.hypercube_lnprior_generalized_simplex,
                                               dumper)

        self.good=True
예제 #15
0
    def get_polychord_settings(polychord_setup, num_params, num_derived):
        """Extract polychord settings and create the settings object.

        Parameters
        ----------
        polychord_setup : ConfigParser
            Polychord section from the main config
        num_params : int
            Number of sampled parameters
        num_derived : int
            Number of derived parameters

        Returns
        -------
        PolyChordSettings
            Settings object for running Polychord
        """
        # Seed and path/name
        seed = polychord_setup.getint('seed', int(0))
        path = os.path.expandvars(polychord_setup.get('path'))
        name = polychord_setup.get('name')

        # The key config parameters
        num_live = polychord_setup.getint('num_live', int(25 * num_params))
        num_repeats = polychord_setup.getint('num_repeats',
                                             int(5 * num_params))
        precision = polychord_setup.getfloat('precision', float(0.001))

        # Resume should almost always be true
        resume = polychord_setup.getboolean('resume', True)
        write_dead = polychord_setup.getboolean('write_dead', True)

        # Useful for plotting as it gives you more posterior samples
        boost_posterior = polychord_setup.getfloat('boost_posterior',
                                                   float(0.0))

        # Do we do clustering, useful for multimodal distributions
        do_clustering = polychord_setup.getboolean('do_clustering', False)
        cluster_posteriors = polychord_setup.getboolean(
            'cluster_posteriors', False)

        # Perform maximisation at the end of the chain
        maximise = polychord_setup.getboolean('maximise', False)

        # These control different sampling speeds
        # grade_frac : List[float]
        #    (Default: [1])
        #    The amount of time to spend in each speed.
        #    If any of grade_frac are <= 1, then polychord will time each
        #    sub-speed, and then choose num_repeats for the number of slowest
        #    repeats, and spend the proportion of time indicated by grade_frac.
        #    Otherwise this indicates the number of repeats to spend in
        #    each speed.
        # grade_dims : List[int]
        #     (Default: nDims)
        #     The number of parameters within each speed.

        # Initialize the settings
        settings = PolyChordSettings(num_params,
                                     num_derived,
                                     base_dir=path,
                                     file_root=name,
                                     seed=seed,
                                     nlive=num_live,
                                     num_repeats=num_repeats,
                                     precision_criterion=precision,
                                     write_resume=resume,
                                     read_resume=resume,
                                     boost_posterior=boost_posterior,
                                     do_clustering=do_clustering,
                                     cluster_posteriors=cluster_posteriors,
                                     equals=False,
                                     write_dead=write_dead,
                                     maximise=maximise,
                                     write_live=False,
                                     write_prior=False)

        # Check the path and get the paramnames path
        output_path = Path(path)
        err_msg = ("The PolyChord 'path' does not correspond to an existing"
                   " folder. Create the output folder before running.")
        assert output_path.exists(), err_msg
        parnames_path = output_path / (name + '.paramnames')

        return settings, parnames_path
예제 #16
0
    # return UniformPrior(-5, 5)(cube)


def log_likelihood(theta):
    x = theta[:-1]
    beta = theta[-1:].item()
    # norm =
    norm = numpy.log(numpy.product(numpy.diagonal(numpy.pi * 2 * cov))) / 2
    gaussian = -norm - numpy.sum(((x - mu_pi) / sig_prior)**2) / 2
    # return (2-beta)*(gaussian) + numpy.log(zed(beta)), []
    # print(gaussian, log_zed(beta), beta)
    return (2 - beta) * gaussian + log_zed(beta), []
    # return gaussian, []
    # return -numpy.linalg.slogdet(cov)[1]/2 - (x - mu_pi)@ numpy.linalg.inv(cov)@(x - mu_pi)/2, []


settings = PolyChordSettings(nDims + 1, 0)
settings.file_root = 'ppr'
settings.nlive = 200
settings.do_clustering = True
settings.read_resume = False


def exe():
    run_polychord(log_likelihood, nDims + 1, 0, settings, prior)


samples = anesthetic.NestedSamples(root='./chains/ppr')
# floating point overflow:
# zed(0.5) = -inf
예제 #17
0
def run_polychord(loglikelihood, prior, dumper, nDims, nlive, root, ndump,
                  num_repeats):
    """Run PolyChord.

    See https://arxiv.org/abs/1506.00171 for more detail

    Parameters
    ----------
    loglikelihood: :obj:`callable`
        probability function taking a single parameter:

        - theta: numpy.array
                 physical parameters, `shape=(nDims,)`

        returning a log-likelihood (float)

    prior: :obj:`callable`
        tranformation function taking a single parameter

        - cube: numpy.array
                hypercube parameters, `shape=(nDims,)`

        returning physical parameters (`numpy.array`)

    dumper: :obj:`callable`
        access function called every nlive iterations giving a window onto
        current live points. Single parameter, no return:

        - live:
               `numpy.array of` live parameters and loglikelihoods,
               `shape=(nlive,nDims+1)`

    nDims: int
        Dimensionality of sampling space

    nlive: int
        Number of live points

    root: str
        base name for output files

    ndump: int
        How many iterations between dumper function calls

    num_repeats: int
        Length of chain to generate new live points

    """
    import pypolychord
    from pypolychord.settings import PolyChordSettings

    nDerived = 0
    settings = PolyChordSettings(nDims, nDerived)
    settings.base_dir = os.path.dirname(root)
    settings.file_root = os.path.basename(root)
    settings.nlive = nlive
    settings.num_repeats = num_repeats
    settings.do_clustering = True
    settings.read_resume = False
    settings.compression_factor = numpy.exp(-float(ndump)/nlive)
    settings.precision_criterion = 0.01

    def polychord_loglikelihood(theta):
        return loglikelihood(theta), []

    def polychord_dumper(live, dead, logweights, logZ, logZerr):
        dumper(live[:, :-2], live[:, -1], dead[:, :-2], dead[:, -1])

    pypolychord.run_polychord(polychord_loglikelihood, nDims, nDerived,
                              settings, prior, polychord_dumper)
예제 #18
0
 def __init__(self, dimensionality, number_derived, file_root='', **kwargs):
     self.settings = PolyChordSettings(dimensionality, number_derived)
     self.settings.file_root = file_root
예제 #19
0
    def polychord_sampler(self):
        import pypolychord
        from pypolychord.settings import PolyChordSettings
        from pypolychord.priors import UniformPrior, GaussianPrior

        ndim = len(self.params.p0)
        nder = 0

        # Log-likelihood compliant with PolyChord's input
        def likelihood(theta):
            return self.lnlike(theta), [0]

        def prior(hypercube):
            prior = []
            for h, pr in zip(hypercube, self.params.p_free_priors):
                if pr[1] == 'Gaussian':
                    prior.append(
                        GaussianPrior(float(pr[2][0]), float(pr[2][1]))(h))
                else:
                    prior.append(
                        UniformPrior(float(pr[2][0]), float(pr[2][2]))(h))
            return prior

        # Optional dumper function giving run-time read access to
        # the live points, dead points, weights and evidences
        def dumper(live, dead, logweights, logZ, logZerr):
            print("Last dead point:", dead[-1])

        settings = PolyChordSettings(ndim, nder)
        settings.base_dir = self.get_output(
            'param_chains')[:-4]  # Remove ".npz"
        settings.file_root = 'pch'
        settings.nlive = self.config['nlive']
        settings.num_repeats = self.config['nrepeat']
        settings.do_clustering = False  # Assume unimodal posterior
        settings.boost_posterior = 10  # Increase number of posterior samples
        settings.nprior = 200  # Draw nprior initial prior samples
        settings.maximise = True  # Maximize posterior at the end
        settings.read_resume = False  # Read from resume file of earlier run
        settings.feedback = 2  # Verbosity {0,1,2,3}

        output = pypolychord.run_polychord(likelihood, ndim, nder, settings,
                                           prior, dumper)

        return output
예제 #20
0
curlimit = resource.getrlimit(
    resource.RLIMIT_STACK)  # get current stack resource size
resource.setrlimit(
    resource.RLIMIT_STACK,
    (resource.RLIM_INFINITY, resource.RLIM_INFINITY))  # set to unlimited

# setup run settings using the PolyChordSetting class
pargs = {
    'nlive': nlive,
    'precision_criterion': tol,
    'base_dir': basedir,
    'file_root': fileroot,
    'write_resume': False,  # don't output a resume file
    'read_resume': False
}  # don't read a resume file
settings = PolyChordSettings(ndims, nderived, **pargs)

# run nested sampling
output = pypolychord.run_polychord(loglikelihood_polychord, ndims, nderived,
                                   settings, prior_transform_polychord)

# reset stack resource size
resource.setrlimit(resource.RLIMIT_STACK, curlimit)

# output marginal likelihood
print('Marginalised evidence is {} ± {}'.format(output.logZ, output.logZerr))

# plot posterior samples (if corner.py is installed)
try:
    import matplotlib as mpl
    mpl.use("Agg")  # force Matplotlib backend to Agg
예제 #21
0
def prior(hypercube):
    """ Uniform prior from [-1,1]^D. """
    return UniformPrior(-1, 1)(hypercube)


#| Optional dumper function giving run-time read access to
#| the live points, dead points, weights and evidences


def dumper(live, dead, logweights, logZ, logZerr):
    print("Last dead point:", dead[-1])


#| Initialise the settings

settings = PolyChordSettings(nDims, nDerived)
settings.file_root = 'gaussian'
settings.nlive = 200
settings.do_clustering = True
settings.read_resume = False

#| Run PolyChord

output = pypolychord.run_polychord(likelihood, nDims, nDerived, settings,
                                   prior, dumper)

#| Create a paramnames file

paramnames = [('p%i' % i, r'\theta_%i' % i) for i in range(nDims)]
paramnames += [('r*', 'r')]
output.make_paramnames_files(paramnames)
예제 #22
0
from pypolychord.settings import PolyChordSettings
import matplotlib.pyplot as plt
import pypolychord as ppc
from pypolychord.priors import UniformPrior


def quantile(cube):
    return UniformPrior(-10, 10)(cube)


def lnL(theta):
    return theta**2


settings = PolyChordSettings(2, 0)
ppc.run_polychord(lnL, 2, 0, settings, quantile)
예제 #23
0
def set_polysettings(rundict, polysettings, ndim, nderived, isodate, parnames):
    """ 
    Sets the correct settings for polychord and returns the PolyChordSettings 
    object.

    Parameters
    ----------
    rundict : dict
        Dictionary with information about the run itself.
    polysettings : dict
        Dictionary with the custom PolyChord settings to be set for this run
    ndim : int
        Number of free parameters
    nderived : int
        Number of derived parameters
    isodate : datetime
        Date stamp to identify the current run
    parnames : list
        List containing the names of the free parameters

    Returns
    -------
    settings : PolyChordSettings object
        Object with all the information that PolyChord needs to run nested
        sampling on this model.
    """

    rundict_keys = list(rundict.keys())
    # Define PolyChord settings
    # Use the settings provided in polysettings otherwise use default
    # Definition of default values for PolyChordSettings
    default_settings = {'nlive': 25*ndim,
                        'num_repeats': 5*ndim,
                        'do_clustering': True,
                        'write_resume': False,
                        'read_resume': False,
                        'feedback': 1,
                        'precision_criterion': 0.001,
                        'boost_posterior': 0.0
                        }

    # Update default values with settings provided by user
    if polysettings != None:
        if type(polysettings) is not dict:
            raise TypeError("polysettings has to be a dictionary")
        else:
            setting = 'nlive'
            if setting in polysettings.keys():
                if type(polysettings[setting]) is not int:
                    raise TypeError(
                        f'{setting} has to be an integer (got type {type(polysettings[setting])})')

            setting = 'num_repeats'
            if setting in polysettings.keys():
                if type(polysettings[setting]) is not int:
                    raise TypeError(
                        f'{setting} has to be an integer (got type {type(polysettings[setting])})')

            setting = 'do_clustering'
            if setting in polysettings.keys():
                if type(polysettings[setting]) is not bool:
                    raise TypeError(
                        f'{setting} has to be a boolean (got type {type(polysettings[setting])})')

            setting = 'read_resume'
            if setting in polysettings.keys():
                if type(polysettings[setting]) is not bool:
                    raise TypeError(
                        f'{setting} has to be a boolean (got type {type(polysettings[setting])})')

            setting = 'precision_criterion'
            if setting in polysettings.keys():
                if type(polysettings[setting]) is not float:
                    raise TypeError(
                        f'{setting} has to be a float (got type {type(polysettings[setting])})')

            default_settings.update(polysettings)

    # Define fileroot name (identifies this specific run)
    rundict['target'] = rundict['target'].replace(
        ' ', '')  # Remove any whitespace
    rundict['runid'] = rundict['runid'].replace(
        ' ', '')  # Remove any whitespace
    file_root = rundict['target']+'_'+rundict['runid']

    # Add comment if it exists and is not empty
    if 'comment' in rundict_keys:
        if rundict['comment'] != '':
            file_root += '-' + rundict['comment']

    # Add number of planets if it exists
    if 'nplanets' in rundict_keys:
        if rundict['nplanets'] is not None:
            file_root += f'_k{rundict["nplanets"]}'

    # Check if there is a drift and add "d{n}" to file name
    drift_order = 0
    for par in parnames:
        if 'drift' in par:
            if par[6:] in ['lin', 'quad', 'cub', 'quar']:
                drift_order += 1
    if drift_order > 0:
        file_root += f'_d{drift_order}'

    # Label the run with nr of planets, live points, nr of cores, sampler and date
    file_root += f'_nlive{default_settings["nlive"]}'
    file_root += f'_ncores{size}'
    file_root += '_polychord'
    file_root += '_'+isodate

    # Base directory
    # Check if a save directory was provided
    if 'save_dir' in rundict_keys:
        save_dir = rundict['save_dir']
    else:
        save_dir = ''
    base_dir = os.path.join(save_dir, file_root, 'polychains')

    # Update settings dictionary with fileroot and basedir
    default_settings.update({'file_root': file_root, 'base_dir': base_dir})

    # Create PolyChorSettings object and assign settings
    settings = PolyChordSettings(ndim, nderived, **default_settings)
    # TODO Think about how to implement resumes

    return settings
예제 #24
0
    def run(self):
        """
        Executes the inference
        """
        if self.prepare():
            # Setup the inference
            ndim = np.sum(self.pstep > 0)
            settings = PolyChordSettings(ndim, 0)
            settings.base_dir = self.outputdir
            settings.file_root = self.fprefix
            settings.nlive = self.nlive
            settings.read_resume = self.resume
            if self.nrepeat is not None:
                settings.num_repeat = self.nrepeat
            settings.precision_criterion = self.dlogz
            settings.grade_dims = [int(ndim)]
            settings.read_resume = False
            settings.feedback = self.verb
            # Run it
            if self.dumper is not None:
                out = pypolychord.run_polychord(self.loglike, ndim, 0,
                                                settings, self.prior,
                                                self.dumper)
            else:
                out = pypolychord.run_polychord(self.loglike, ndim, 0,
                                                settings, self.prior)

            outp = np.loadtxt(os.path.join(self.outputdir, self.fprefix) +\
                                   '_equal_weights.txt')
            self.outp = outp[:, 2:].T
            ibest = np.argmin(outp[:, 1])
            self.bestp = self.outp[:, ibest]
            # Save posterior and bestfit params
            if self.fsavefile is not None:
                np.save(self.fsavefile, self.outp)
            if self.fbestp is not None:
                np.save(self.fbestp, self.bestp)
            return self.outp, self.bestp
        else:
            if self.verb:
                print("Sampler is not fully prepared to run. " + \
                      "Correct the above errors and try again.")
예제 #25
0
파일: sampler.py 프로젝트: suksien/picca
    def run(self):
        '''
        Run Polychord

        We need to pass 3 functions:
        log_lik - compute likelihood for a paramater set theta
        prior - defines the box prior
        dumper - extracts info during runtime - empty for now
        '''
        par_names = {name: name for d in self.data for name in d.pars_init}
        val_dict = {
            name: val
            for d in self.data for name, val in d.pars_init.items()
        }
        lim_dict = {
            name: lim
            for d in self.data for name, lim in d.par_limit.items()
        }
        fix_dict = {
            name: fix
            for d in self.data for name, fix in d.par_fixed.items()
        }

        # Select the parameters we sample
        sampled_pars_ind = np.array(
            [i for i, val in enumerate(fix_dict.values()) if not val])
        npar = len(sampled_pars_ind)
        nder = 0

        # Get the limits for the free params
        limits = np.array(
            [list(lim_dict.values())[i] for i in sampled_pars_ind])
        names = np.array(
            [list(par_names.values())[i] for i in sampled_pars_ind])

        def log_lik(theta):
            ''' Wrapper for likelihood function passed to Polychord '''
            pars = val_dict.copy()
            for i, name in enumerate(names):
                pars[name] = theta[i]

            log_lik = self.log_lik(pars)
            return log_lik, []

        def prior(hypercube):
            ''' Uniform prior '''
            prior = []
            for i, lims in enumerate(limits):
                prior.append(UniformPrior(lims[0], lims[1])(hypercube[i]))
            return prior

        def dumper(live, dead, logweights, logZ, logZerr):
            ''' Dumper function empty for now '''
            pass

        # Get the settings we need and add defaults
        # These are the same as PolyChord recommends
        nlive = self.polychord_setup.getint('nlive', int(25 * npar))
        seed = self.polychord_setup.getint('seed', int(0))
        num_repeats = self.polychord_setup.getint('num_repeats', int(5 * npar))
        precision = self.polychord_setup.getfloat('precision', float(0.001))
        boost_posterior = self.polychord_setup.getfloat(
            'boost_posterior', float(0.0))
        resume = self.polychord_setup.getboolean('resume', True)
        cluster_posteriors = self.polychord_setup.getboolean(
            'cluster_posteriors', False)
        do_clustering = self.polychord_setup.getboolean('do_clustering', False)
        path = self.polychord_setup.get('path')
        filename = self.polychord_setup.get('name')
        write_live = self.polychord_setup.getboolean('write_live', False)
        write_dead = self.polychord_setup.getboolean('write_dead', True)
        write_prior = self.polychord_setup.getboolean('write_prior', False)

        # Initialize and run PolyChord
        settings = PolyChordSettings(npar,
                                     nder,
                                     base_dir=path,
                                     file_root=filename,
                                     seed=seed,
                                     nlive=nlive,
                                     precision_criterion=precision,
                                     num_repeats=num_repeats,
                                     boost_posterior=boost_posterior,
                                     cluster_posteriors=cluster_posteriors,
                                     do_clustering=do_clustering,
                                     equals=False,
                                     write_resume=resume,
                                     read_resume=resume,
                                     write_live=write_live,
                                     write_dead=write_dead,
                                     write_prior=write_prior)
        pypolychord.run_polychord(log_lik, npar, nder, settings, prior, dumper)
예제 #26
0
 def initialize(self):
     """Imports the PolyChord sampler and prepares its arguments."""
     if not get_mpi_rank():  # rank = 0 (MPI master) or None (no MPI)
         self.log.info("Initializing")
     # If path not given, try using general path to modules
     if not self.path and self.path_install:
         self.path = get_path(self.path_install)
     if self.path:
         if not get_mpi_rank():
             self.log.info("Importing *local* PolyChord from " + self.path)
             if not os.path.exists(os.path.realpath(self.path)):
                 self.log.error("The given path does not exist.")
                 raise HandledException
         pc_build_path = get_build_path(self.path)
         if not pc_build_path:
             self.log.error("Either PolyChord is not in the given folder, "
                            "'%s', or you have not compiled it.", self.path)
             raise HandledException
         # Inserting the previously found path into the list of import folders
         sys.path.insert(0, pc_build_path)
     else:
         self.log.info("Importing *global* PolyChord.")
     try:
         import pypolychord
         from pypolychord.settings import PolyChordSettings
         self.pc = pypolychord
     except ImportError:
         self.log.error(
             "Couldn't find the PolyChord python interface. "
             "Make sure that you have compiled it, and that you either\n"
             " (a) specify a path (you didn't) or\n"
             " (b) install the Python interface globally with\n"
             "     '/path/to/PolyChord/python setup.py install --user'")
         raise HandledException
     # Prepare arguments and settings
     self.nDims = self.model.prior.d()
     self.nDerived = (len(self.model.parameterization.derived_params()) +
                      len(self.model.prior) + len(self.model.likelihood._likelihoods))
     if self.logzero is None:
         self.logzero = np.nan_to_num(-np.inf)
     if self.max_ndead == np.inf:
         self.max_ndead = -1
     for p in ["nlive", "num_repeats", "nprior", "max_ndead"]:
         setattr(self, p, read_dnumber(getattr(self, p), self.nDims, dtype=int))
     # Fill the automatic ones
     if getattr(self, "feedback", None) is None:
         values = {logging.CRITICAL: 0, logging.ERROR: 0, logging.WARNING: 0,
                   logging.INFO: 1, logging.DEBUG: 2}
         self.feedback = values[self.log.getEffectiveLevel()]
     try:
         output_folder = getattr(self.output, "folder")
         output_prefix = getattr(self.output, "prefix") or ""
         self.read_resume = self.resuming
     except AttributeError:
         # dummy output -- no resume!
         self.read_resume = False
         from tempfile import gettempdir
         output_folder = gettempdir()
         if not get_mpi_rank():
             from random import random
             output_prefix = hex(int(random() * 16 ** 6))[2:]
         else:
             output_prefix = None
         if get_mpi():
             output_prefix = get_mpi_comm().bcast(output_prefix, root=0)
     self.base_dir = os.path.join(output_folder, self.base_dir)
     self.file_root = output_prefix
     if not get_mpi_rank():
         # Creating output folder, if it does not exist (just one process)
         if not os.path.exists(self.base_dir):
             os.makedirs(self.base_dir)
         # Idem, a clusters folder if needed -- notice that PolyChord's default
         # is "True", here "None", hence the funny condition below
         if self.do_clustering is not False:  # None here means "default"
             try:
                 os.makedirs(os.path.join(self.base_dir, clusters))
             except OSError:  # exists!
                 pass
         self.log.info("Storing raw PolyChord output in '%s'.",
                       self.base_dir)
     # Exploiting the speed hierarchy
     speeds, blocks = self.model.likelihood._speeds_of_params(int_speeds=True)
     blocks_flat = list(chain(*blocks))
     self.ordering = [
         blocks_flat.index(p) for p in self.model.parameterization.sampled_params()]
     self.grade_dims = [len(block) for block in blocks]
     #        self.grade_frac = np.array(
     #            [i*j for i,j in zip(self.grade_dims, speeds)])
     #        self.grade_frac = (
     #            self.grade_frac/sum(self.grade_frac))
     # Disabled for now. We need a way to override the "time" part of the meaning of grade_frac
     self.grade_frac = [1 / len(self.grade_dims) for _ in self.grade_dims]
     # Assign settings
     pc_args = ["nlive", "num_repeats", "nprior", "do_clustering",
                "precision_criterion", "max_ndead", "boost_posterior", "feedback",
                "logzero", "update_files", "posteriors", "equals",
                "cluster_posteriors", "write_resume", "read_resume", "write_stats",
                "write_live", "write_dead", "base_dir", "grade_frac", "grade_dims",
                "feedback", "read_resume", "base_dir", "file_root", "grade_frac",
                "grade_dims"]
     self.pc_settings = PolyChordSettings(
         self.nDims, self.nDerived, seed=(self.seed if self.seed is not None else -1),
         **{p: getattr(self, p) for p in pc_args if getattr(self, p) is not None})
     # prior conversion from the hypercube
     bounds = self.model.prior.bounds(
         confidence_for_unbounded=self.confidence_for_unbounded)
     # Check if priors are bounded (nan's to inf)
     inf = np.where(np.isinf(bounds))
     if len(inf[0]):
         params_names = self.model.parameterization.sampled_params()
         params = [params_names[i] for i in sorted(list(set(inf[0])))]
         self.log.error("PolyChord needs bounded priors, but the parameter(s) '"
                        "', '".join(params) + "' is(are) unbounded.")
         raise HandledException
     locs = bounds[:, 0]
     scales = bounds[:, 1] - bounds[:, 0]
     # This function re-scales the parameters AND puts them in the right order
     self.pc_prior = lambda x: (locs + np.array(x)[self.ordering] * scales).tolist()
     # We will need the volume of the prior domain, since PolyChord divides by it
     self.logvolume = np.log(np.prod(scales))
     # Done!
     if not get_mpi_rank():
         self.log.info("Calling PolyChord with arguments:")
         for p, v in inspect.getmembers(self.pc_settings, lambda a: not (callable(a))):
             if not p.startswith("_"):
                 self.log.info("  %s: %s", p, v)
예제 #27
0
    def compute_fit(self):
        self._polychord_output = None
        data = self._observed.spectrum
        datastd = self._observed.errorBar
        sqrtpi = np.sqrt(2 * np.pi)

        ndim = len(self.fitting_parameters)

        def polychord_loglike(cube):
            # log-likelihood function called by polychord
            fit_params_container = np.array(
                [cube[i] for i in range(len(self.fitting_parameters))])
            chi_t = self.chisq_trans(fit_params_container, data, datastd)

            # print('---------START---------')
            # print('chi_t',chi_t)
            # print('LOG',loglike)
            loglike = -np.sum(np.log(datastd * sqrtpi)) - 0.5 * chi_t
            return loglike, [0.0]

        def polychord_uniform_prior(hypercube):
            # prior distributions called by polychord. Implements a uniform prior
            # converting parameters from normalised grid to uniform prior
            # print(type(cube))
            cube = [0.0] * ndim

            for idx, bounds in enumerate(self.fit_boundaries):
                # print(idx,self.fitting_parameters[idx])
                bound_min, bound_max = bounds
                cube[idx] = (hypercube[idx] *
                             (bound_max - bound_min)) + bound_min
                #print('CUBE idx',cube[idx])
            # print('-----------')
            return cube

        status = None

        datastd_mean = np.mean(datastd)

        settings = PolyChordSettings(ndim, 1)
        settings.nlive = ndim * 25
        settings.num_repeats = ndim * 5
        settings.do_clustering = self.do_clustering
        settings.num_repeats = ndim
        settings.precision_criterion = self.evidence_tolerance
        settings.logzero = -1e70
        settings.read_resume = self.resume
        settings.base_dir = self.dir_polychord
        settings.file_root = '1-'
        self.warning('Number of dimensions {}'.format(ndim))
        self.warning('Fitting parameters {}'.format(self.fitting_parameters))

        self.info('Beginning fit......')
        pypolychord.run_polychord(polychord_loglike, ndim, 1, settings,
                                  polychord_uniform_prior)
        self._polychord_output = self.store_polychord_solutions()
        print(self._polychord_output)
예제 #28
0
    run_polychord(log_likelihood, nDims, nDerived, settings, prior)
    _samples = NestedSamples(root='chains/' + file_root)
    _samples.rename(columns={0: 'm', 1: 'c'}, inplace=True)
    return _samples


def bench(prior, series):
    # global linear_start, samples, linear_stop
    start = time()
    _samples = run_iteration(series, prior, log_likelihood)
    stop = time()
    return start, stop, _samples


if __name__ == "__main__":
    settings = PolyChordSettings(2, 0, read_resume=False)
    x_data = numpy.linspace(-3, 3, 250)
    # x_data, y_data = generate_noisy_from_model(lambda x: 1.2 * x + 1.2,
    # x_data, 0, 0.1)
    x_data, y_data = mixture_model(lambda x: 1.1 * x + 1.1,
                                   lambda x: 1.4 * x + 1.1,
                                   x_data, x_noise_amplitude=0,
                                   y_noise_amplitude=0.1)
    log_likelihood = lambda theta: lnz(x_data, y_data, theta)

    linear_start, linear_stop, samples = bench(uniform_prior, 'linear')
    # samples.plot_2d(['m', 'c'])
    # plt.show()

    muc = numpy.linspace(0.1, 2, 10)
    mum = numpy.linspace(0.1, 2, 10)
예제 #29
0
 def initialize(self):
     """Imports the PolyChord sampler and prepares its arguments."""
     # Allow global import if no direct path specification
     allow_global = not self.path
     if not self.path and self.packages_path:
         self.path = self.get_path(self.packages_path)
     self.pc = self.is_installed(path=self.path, allow_global=allow_global)
     if not self.pc:
         raise NotInstalledError(
             self.log,
             "Could not find PolyChord. Check error message above. "
             "To install it, run 'cobaya-install polychord --%s "
             "[packages_path]'", _packages_path_arg)
     # Prepare arguments and settings
     from pypolychord.settings import PolyChordSettings
     self.n_sampled = len(self.model.parameterization.sampled_params())
     self.n_derived = len(self.model.parameterization.derived_params())
     self.n_priors = len(self.model.prior)
     self.n_likes = len(self.model.likelihood)
     self.nDims = self.model.prior.d()
     self.nDerived = (self.n_derived + self.n_priors + self.n_likes)
     if self.logzero is None:
         self.logzero = np.nan_to_num(-np.inf)
     if self.max_ndead == np.inf:
         self.max_ndead = -1
     self._quants_d_units = ["nlive", "max_ndead"]
     for p in self._quants_d_units:
         if getattr(self, p) is not None:
             setattr(
                 self, p,
                 NumberWithUnits(getattr(self, p),
                                 "d",
                                 scale=self.nDims,
                                 dtype=int).value)
     self._quants_nlive_units = ["nprior"]
     for p in self._quants_nlive_units:
         if getattr(self, p) is not None:
             setattr(
                 self, p,
                 NumberWithUnits(getattr(self, p),
                                 "nlive",
                                 scale=self.nlive,
                                 dtype=int).value)
     # Fill the automatic ones
     if getattr(self, "feedback", None) is None:
         values = {
             logging.CRITICAL: 0,
             logging.ERROR: 0,
             logging.WARNING: 0,
             logging.INFO: 1,
             logging.DEBUG: 2
         }
         self.feedback = values[self.log.getEffectiveLevel()]
     # Prepare output folders and prefixes
     if self.output:
         self.file_root = self.output.prefix
         self.read_resume = self.output.is_resuming()
     else:
         output_prefix = share_mpi(
             hex(int(random() * 16**6))[2:] if is_main_process() else None)
         self.file_root = output_prefix
         # dummy output -- no resume!
         self.read_resume = False
     self.base_dir = self.get_base_dir(self.output)
     self.raw_clusters_dir = os.path.join(self.base_dir, self._clusters_dir)
     self.output.create_folder(self.base_dir)
     if self.do_clustering:
         self.clusters_folder = self.get_clusters_dir(self.output)
         self.output.create_folder(self.clusters_folder)
     self.mpi_info("Storing raw PolyChord output in '%s'.", self.base_dir)
     # Exploiting the speed hierarchy
     if self.blocking:
         blocks, oversampling_factors = self.model.check_blocking(
             self.blocking)
     else:
         if self.measure_speeds:
             self.model.measure_and_set_speeds(n=self.measure_speeds)
         blocks, oversampling_factors = self.model.get_param_blocking_for_sampler(
             oversample_power=self.oversample_power)
     self.mpi_info("Parameter blocks and their oversampling factors:")
     max_width = len(str(max(oversampling_factors)))
     for f, b in zip(oversampling_factors, blocks):
         self.mpi_info("* %" + "%d" % max_width + "d : %r", f, b)
     # Save blocking in updated info, in case we want to resume
     self._updated_info["blocking"] = list(zip(oversampling_factors,
                                               blocks))
     blocks_flat = list(chain(*blocks))
     self.ordering = [
         blocks_flat.index(p)
         for p in self.model.parameterization.sampled_params()
     ]
     self.grade_dims = [len(block) for block in blocks]
     # Steps per block
     # NB: num_repeats is ignored by PolyChord when int "grade_frac" given,
     # so needs to be applied by hand.
     # In num_repeats, `d` is interpreted as dimension of each block
     self.grade_frac = [
         int(o * read_dnumber(self.num_repeats, dim_block))
         for o, dim_block in zip(oversampling_factors, self.grade_dims)
     ]
     # Assign settings
     pc_args = [
         "nlive", "num_repeats", "nprior", "do_clustering",
         "precision_criterion", "max_ndead", "boost_posterior", "feedback",
         "logzero", "posteriors", "equals", "compression_factor",
         "cluster_posteriors", "write_resume", "read_resume", "write_stats",
         "write_live", "write_dead", "base_dir", "grade_frac", "grade_dims",
         "feedback", "read_resume", "base_dir", "file_root", "grade_frac",
         "grade_dims"
     ]
     # As stated above, num_repeats is ignored, so let's not pass it
     pc_args.pop(pc_args.index("num_repeats"))
     self.pc_settings = PolyChordSettings(
         self.nDims,
         self.nDerived,
         seed=(self.seed if self.seed is not None else -1),
         **{
             p: getattr(self, p)
             for p in pc_args if getattr(self, p) is not None
         })
     # prior conversion from the hypercube
     bounds = self.model.prior.bounds(
         confidence_for_unbounded=self.confidence_for_unbounded)
     # Check if priors are bounded (nan's to inf)
     inf = np.where(np.isinf(bounds))
     if len(inf[0]):
         params_names = self.model.parameterization.sampled_params()
         params = [params_names[i] for i in sorted(list(set(inf[0])))]
         raise LoggedError(
             self.log,
             "PolyChord needs bounded priors, but the parameter(s) '"
             "', '".join(params) + "' is(are) unbounded.")
     locs = bounds[:, 0]
     scales = bounds[:, 1] - bounds[:, 0]
     # This function re-scales the parameters AND puts them in the right order
     self.pc_prior = lambda x: (locs + np.array(x)[self.ordering] * scales
                                ).tolist()
     # We will need the volume of the prior domain, since PolyChord divides by it
     self.logvolume = np.log(np.prod(scales))
     # Prepare callback function
     if self.callback_function is not None:
         self.callback_function_callable = (get_external_function(
             self.callback_function))
     self.last_point_callback = 0
     # Prepare runtime live and dead points collections
     self.live = Collection(self.model,
                            None,
                            name="live",
                            initial_size=self.pc_settings.nlive)
     self.dead = Collection(self.model, self.output, name="dead")
     # Done!
     if is_main_process():
         self.log.debug("Calling PolyChord with arguments:")
         for p, v in inspect.getmembers(self.pc_settings,
                                        lambda a: not (callable(a))):
             if not p.startswith("_"):
                 self.log.debug("  %s: %s", p, v)
     self.mpi_info("Initialized!")
    return result, []


def prior(hypercube):
    """ Priors for each parameter. """
    theta = [0.0] * nDims
    for i, x in enumerate(hypercube):
        theta[i] = priordict[parnames[i]].ppf(x)

    return theta


# Define PolyChord settings
settings = PolyChordSettings(
    nDims,
    nDerived,
)
settings.do_clustering = args_params.noclust
settings.nlive = nDims * args_params.nlive
settings.base_dir = base_dir
settings.file_root = 'hd40307_k{}'.format(nplanets)  # modelpath[12:-3]
settings.num_repeats = nDims * args_params.nrep
settings.precision_criterion = args_params.prec
settings.read_resume = False

# Change settings if resume is true
if args_params.resume:
    settings.read_resume = args_params.resume
    settings.base_dir = dirname + prev_run

# Run PolyChord