Example #1
0
    def __call__(self, settings_dict, comm=None):
        """
        Runs pypolychord with specified inputs and writes output files. See the
        pypolychord documentation for more details.

        Parameters
        ----------
        settings_dict: dict
            Input PolyChord settings.
        comm: None or mpi4py MPI.COMM object, optional
            For MPI parallelisation.
        """
        if comm is None:
            settings = pypolychord_settings.PolyChordSettings(
                self.ndim, self.nderived, **settings_dict)
        else:
            rank = comm.Get_rank()
            if rank == 0:
                settings = pypolychord_settings.PolyChordSettings(
                    self.ndim, self.nderived, **settings_dict)
            else:
                settings = None
            settings = comm.bcast(settings, root=0)
        pypolychord.run_polychord(self.likelihood,
                                  self.ndim,
                                  self.nderived,
                                  settings,
                                  prior=self.prior)
Example #2
0
    def run_sampler(self):
        import pypolychord
        from pypolychord.settings import PolyChordSettings
        if self.kwargs['use_polychord_defaults']:
            settings = PolyChordSettings(nDims=self.ndim,
                                         nDerived=self.ndim,
                                         base_dir=self.outdir,
                                         file_root=self.label)
        else:
            self._setup_dynamic_defaults()
            pc_kwargs = self.kwargs.copy()
            pc_kwargs['base_dir'] = self.outdir
            pc_kwargs['file_root'] = self.label
            pc_kwargs.pop('use_polychord_defaults')
            settings = PolyChordSettings(nDims=self.ndim,
                                         nDerived=self.ndim,
                                         **pc_kwargs)
        self._verify_kwargs_against_default_kwargs()

        pypolychord.run_polychord(loglikelihood=self.log_likelihood,
                                  nDims=self.ndim,
                                  nDerived=self.ndim,
                                  settings=settings,
                                  prior=self.prior_transform)

        self.result.log_evidence, self.result.log_evidence_err = self._read_out_stats_file(
        )
        self.result.samples = self._read_sample_file()
        return self.result
Example #3
0
    def compute_fit(self):
        self._polychord_output = None
        data = self._observed.spectrum
        datastd = self._observed.errorBar
        sqrtpi = np.sqrt(2 * np.pi)

        ndim = len(self.fitting_parameters)

        def polychord_loglike(cube):
            # log-likelihood function called by polychord
            fit_params_container = np.array(
                [cube[i] for i in range(len(self.fitting_parameters))])
            chi_t = self.chisq_trans(fit_params_container, data, datastd)

            # print('---------START---------')
            # print('chi_t',chi_t)
            # print('LOG',loglike)
            loglike = -np.sum(np.log(datastd * sqrtpi)) - 0.5 * chi_t
            return loglike, [0.0]

        def polychord_uniform_prior(hypercube):
            # prior distributions called by polychord. Implements a uniform prior
            # converting parameters from normalised grid to uniform prior
            # print(type(cube))
            cube = [0.0] * ndim

            for idx, bounds in enumerate(self.fit_boundaries):
                # print(idx,self.fitting_parameters[idx])
                bound_min, bound_max = bounds
                cube[idx] = (hypercube[idx] *
                             (bound_max - bound_min)) + bound_min
                #print('CUBE idx',cube[idx])
            # print('-----------')
            return cube

        status = None

        datastd_mean = np.mean(datastd)

        settings = PolyChordSettings(ndim, 1)
        settings.nlive = ndim * 25
        settings.num_repeats = ndim * 5
        settings.do_clustering = self.do_clustering
        settings.num_repeats = ndim
        settings.precision_criterion = self.evidence_tolerance
        settings.logzero = -1e70
        settings.read_resume = self.resume
        settings.base_dir = self.dir_polychord
        settings.file_root = '1-'
        self.warning('Number of dimensions {}'.format(ndim))
        self.warning('Fitting parameters {}'.format(self.fitting_parameters))

        self.info('Beginning fit......')
        pypolychord.run_polychord(polychord_loglike, ndim, 1, settings,
                                  polychord_uniform_prior)
        self._polychord_output = self.store_polychord_solutions()
        print(self._polychord_output)
Example #4
0
def run_iteration(file_root, prior, log_likelihood, nDims=2, nDerived=0):
    global settings
    settings.file_root = file_root
    settings.nDims = nDims
    settings.nDerived = nDerived
    settings.feedback = 0
    run_polychord(log_likelihood, nDims, nDerived, settings, prior)
    _samples = NestedSamples(root='chains/' + file_root)
    _samples.rename(columns={0: 'm', 1: 'c'}, inplace=True)
    return _samples
Example #5
0
 def run(self, verbose=False):
     """Initiate the PolyChord Nested Sampling run."""
     output = pypolychord.run_polychord(self._likelihood, self._nDims,
                                        self._nDerived, self._settings,
                                        self._prior, self._dumper)
     self._output = output
     return output.logZ, output.logZerr
def exec_polychord(root_name: str,
                   m,
                   s,
                   likelihood,
                   renew_plots=True,
                   prior=simple_prior,
                   nLive=200,
                   fig=None,
                   ax=None):
    nDerived = 0

    nDims = m.size

    settings = pypolychord.settings.PolyChordSettings(nDims, nDerived)
    settings.file_root = root_name
    settings.nlive = nLive
    settings.do_clustering = True
    settings.read_resume = False
    settings.feedback = 0
    # likelihood = lambda x: gaussian_likelihood(x, m, s)
    output = pypolychord.run_polychord(likelihood, nDims, nDerived, settings,
                                       prior)
    samples = anesthetic.NestedSamples(root='./chains/' + settings.file_root)
    # if fig is None and ax is None:
    if fig is None or renew_plots:
        fig, ax = samples.plot_2d([0, 1])
    else:
        samples.plot_2d(ax)
def find_samples(root_name: str,
                 m,
                 s,
                 likelihood,
                 renew_plots=True,
                 prior=simple_prior,
                 nLive=200,
                 fig=None,
                 ax=None):
    try:
        return anesthetic.NestedSamples('./chains/' + root_name)
    except FileNotFoundError:
        nDerived = 0

        nDims = m.size

        settings = pypolychord.settings.PolyChordSettings(nDims, nDerived)
        settings.file_root = root_name
        settings.nlive = nLive
        settings.do_clustering = True
        settings.read_resume = False
        settings.feedback = 0
        # likelihood = lambda x: gaussian_likelihood(x, m, s)
        output = pypolychord.run_polychord(likelihood, nDims, nDerived,
                                           settings, prior)
        return anesthetic.NestedSamples(root='./chains/' + settings.file_root)
Example #8
0
 def run_sampler(self):
     import pypolychord
     from pypolychord.settings import PolyChordSettings
     if self.kwargs['use_polychord_defaults']:
         settings = PolyChordSettings(nDims=self.ndim,
                                      nDerived=self.ndim,
                                      base_dir=self._sample_file_directory,
                                      file_root=self.label)
     else:
         self._setup_dynamic_defaults()
         pc_kwargs = self.kwargs.copy()
         pc_kwargs['base_dir'] = self._sample_file_directory
         pc_kwargs['file_root'] = self.label
         pc_kwargs.pop('use_polychord_defaults')
         settings = PolyChordSettings(nDims=self.ndim,
                                      nDerived=self.ndim,
                                      **pc_kwargs)
     self._verify_kwargs_against_default_kwargs()
     out = pypolychord.run_polychord(loglikelihood=self.log_likelihood,
                                     nDims=self.ndim,
                                     nDerived=self.ndim,
                                     settings=settings,
                                     prior=self.prior_transform)
     self.result.log_evidence = out.logZ
     self.result.log_evidence_err = out.logZerr
     log_likelihoods, physical_parameters = self._read_sample_file()
     self.result.log_likelihood_evaluations = log_likelihoods
     self.result.samples = physical_parameters
     self.calc_likelihood_count()
     return self.result
Example #9
0
def exec_polychord(series_name, settings, loglike, prior):
    print('running {}'.format(series_name))
    settings.file_root = series_name
    output = pypolychord.run_polychord(loglike, settings.nDims,
                                       settings.nDerived, settings, prior)
    sample = NestedSamples(root='./chains/{}'.format(settings.file_root))
    outputs[series_name] = output
    samples[series_name] = sample
    return output, sample
Example #10
0
    def run(self):
        """
        Executes the inference
        """
        if self.prepare():
            # Setup the inference
            ndim = np.sum(self.pstep > 0)
            settings = PolyChordSettings(ndim, 0)
            settings.base_dir = self.outputdir
            settings.file_root = self.fprefix
            settings.nlive = self.nlive
            settings.read_resume = self.resume
            if self.nrepeat is not None:
                settings.num_repeat = self.nrepeat
            settings.precision_criterion = self.dlogz
            settings.grade_dims = [int(ndim)]
            settings.read_resume = False
            settings.feedback = self.verb
            # Run it
            if self.dumper is not None:
                out = pypolychord.run_polychord(self.loglike, ndim, 0,
                                                settings, self.prior,
                                                self.dumper)
            else:
                out = pypolychord.run_polychord(self.loglike, ndim, 0,
                                                settings, self.prior)

            outp = np.loadtxt(os.path.join(self.outputdir, self.fprefix) +\
                                   '_equal_weights.txt')
            self.outp = outp[:, 2:].T
            ibest = np.argmin(outp[:, 1])
            self.bestp = self.outp[:, ibest]
            # Save posterior and bestfit params
            if self.fsavefile is not None:
                np.save(self.fsavefile, self.outp)
            if self.fbestp is not None:
                np.save(self.fbestp, self.bestp)
            return self.outp, self.bestp
        else:
            if self.verb:
                print("Sampler is not fully prepared to run. " + \
                      "Correct the above errors and try again.")
Example #11
0
 def nested_sample(self, **kwargs):
     self.test_log_like()
     self.test_quantile()
     _settings = self.setup_settings(**kwargs)
     output = run_polychord(self.log_likelihood, self.dimensionality, self.num_derived, _settings, self.quantile)
     try:
         samples = NestedSamples(
             root=f'./chains/{_settings.file_root}')
     except ValueError as e:
         print(e)
         samples = None
     return output, samples
Example #12
0
    def run(self):
        """Run Polychord. We need to pass three functions:

        log_lik: takes a list of parameter values and
            returns tuple: (log_lik, list of derived)

        prior: takes a unit hypercube and converts it to the
            physical parameters

        dumper: Optional function if we want to get some output while
            the chain is running. For now it's empty
        """
        # Write parameter names
        self.write_parnames()

        def log_lik(theta):
            """ Wrapper for likelihood. No derived for now """
            params = {}
            for i, name in enumerate(self.names):
                params[name] = theta[i]

            log_lik = self.log_lik(params)
            return log_lik, []

        def prior(hypercube):
            """ Uniform prior """
            prior = []
            for i, limits in enumerate(self.limits.values()):
                prior.append(UniformPrior(limits[0], limits[1])(hypercube[i]))
            return prior

        def dumper(live, dead, logweights, logZ, logZ_err):
            """ Dumper empty for now"""
            pass

        pypolychord.run_polychord(log_lik, self.num_params, self.num_derived,
                                  self.settings, prior, dumper)
Example #13
0
    def polychord_sampler(self):
        import pypolychord
        from pypolychord.settings import PolyChordSettings
        from pypolychord.priors import UniformPrior, GaussianPrior

        ndim = len(self.params.p0)
        nder = 0

        # Log-likelihood compliant with PolyChord's input
        def likelihood(theta):
            return self.lnlike(theta), [0]

        def prior(hypercube):
            prior = []
            for h, pr in zip(hypercube, self.params.p_free_priors):
                if pr[1] == 'Gaussian':
                    prior.append(
                        GaussianPrior(float(pr[2][0]), float(pr[2][1]))(h))
                else:
                    prior.append(
                        UniformPrior(float(pr[2][0]), float(pr[2][2]))(h))
            return prior

        # Optional dumper function giving run-time read access to
        # the live points, dead points, weights and evidences
        def dumper(live, dead, logweights, logZ, logZerr):
            print("Last dead point:", dead[-1])

        settings = PolyChordSettings(ndim, nder)
        settings.base_dir = self.get_output(
            'param_chains')[:-4]  # Remove ".npz"
        settings.file_root = 'pch'
        settings.nlive = self.config['nlive']
        settings.num_repeats = self.config['nrepeat']
        settings.do_clustering = False  # Assume unimodal posterior
        settings.boost_posterior = 10  # Increase number of posterior samples
        settings.nprior = 200  # Draw nprior initial prior samples
        settings.maximise = True  # Maximize posterior at the end
        settings.read_resume = False  # Read from resume file of earlier run
        settings.feedback = 2  # Verbosity {0,1,2,3}

        output = pypolychord.run_polychord(likelihood, ndim, nder, settings,
                                           prior, dumper)

        return output
    def run_polychord(num_live_points):
        try:
            import pypolychord
            from pypolychord.settings import PolyChordSettings
            from pypolychord.priors import UniformPrior
        except:
            raise ImportError("Polychord not installed.\nRun `git clone https://github.com/PolyChord/PolyChordLite.git \ncd PolyChordLite\npython setup.py install`.")

        def likelihood(theta):
            """ Simple Gaussian Likelihood"""
            nDims = len(theta)
            r2 = sum(theta**2)
            logL = -log(2*pi*sigma*sigma)*nDims/2.0
            logL += -r2/2/sigma/sigma
            return logL, [r2]


        def prior(hypercube):
            """ Uniform prior from [-1,1]^D. """
            return UniformPrior(-1, 1)(hypercube)


        def dumper(live, dead, logweights, logZ, logZerr):
            return
            # print("Last dead point:", dead[-1])

        settings = PolyChordSettings(ndims, 1)
        settings.file_root = 'gaussian'
        settings.nlive = num_live_points
        settings.do_clustering = True
        settings.read_resume = False

        t0 = default_timer()
        output = pypolychord.run_polychord(likelihood, ndims, 1, settings, prior, dumper)
        run_time = default_timer() - t0
        print("polychord log(Z):", output.logZ)
        return run_time
# from_ppr = bench(run_polychord, likelihood, d, 0,
#                  options, PRprior, repeats=20)
# for nlive=50
# [2.5492191314697266, 2.678072214126587, 2.2829549312591553, 2.3218448162078857, 2.4961631298065186, 2.916872978210449, 2.3596138954162598, 2.6929938793182373, 2.2408578395843506, 2.8879880905151367, 2.408330202102661, 2.240095853805542, 2.662424087524414, 2.345767021179199, 2.3510117530822754, 2.2823920249938965, 2.6985690593719482, 2.3489580154418945, 2.3447391986846924, 2.4235379695892334]

# for nlive=512
# [22.410197973251343, 22.148893356323242, 22.428645133972168, 22.378524780273438, 22.478039979934692, 22.671180963516235, 22.617368936538696, 22.897464990615845, 22.708940029144287, 22.631688833236694, 22.425545930862427, 22.523781776428223, 22.688953161239624, 22.233675956726074, 22.2305850982666, 22.432966947555542, 22.33135986328125, 22.58033013343811, 22.07505202293396, 21.925750732421875]

# from_mixture = bench(run_polychord, likelihood, d, 0,
#                      options, mixture_model_prior, repeats=20)
# for nlive=50
# [2.2318079471588135, 2.2587890625, 2.2009570598602295, 2.0472140312194824, 2.491523027420044, 2.203134059906006, 2.812570095062256, 2.377692937850952, 2.267406702041626, 2.1818931102752686, 2.3163158893585205, 2.321192979812622, 2.432847738265991, 2.2643849849700928, 2.2828149795532227, 2.400684118270874, 2.709566831588745, 2.191816806793213, 2.3305797576904297, 2.2695229053497314]

# for nlive=512
# [21.301582098007202, 21.930859804153442, 22.009080171585083, 21.122539043426514, 21.079975128173828, 21.6564359664917, 22.570042848587036, 21.710810899734497, 21.9978129863739, 21.61481499671936, 21.323888301849365, 21.60486602783203, 21.93395495414734, 21.48937678337097, 22.27761721611023, 21.79326891899109, 22.073890924453735, 21.426614999771118, 22.318853855133057, 21.810243844985962]

options.file_root = 'gaussian'
run_polychord(likelihood, d, 0, options, gaussian_prior)

# run_polychord(likelihood, d, 0, options, offset_gaussian_prior)  # !!
# run_polychord(likelihood, d, 0, options, rev_offset_gaussian_prior)  # !!
# options.file_root = 'mixture'
# options.precision_criterion = 1e-2
# run_polychord(likelihood, d, 0, options, mixture_model_prior)
# options.file_root = 'PPR'
# run_polychord(likelihood, d, 0, options, PRprior)

# run_polychord(likelihood, d, 0, options, bloated_gaussian_prior)
# options.file_root = 'uniform'
# run_polychord(likelihood, d, 0, options, uniform_prior)
Example #16
0
    def run(self):
        '''
        Run Polychord

        We need to pass 3 functions:
        log_lik - compute likelihood for a paramater set theta
        prior - defines the box prior
        dumper - extracts info during runtime - empty for now
        '''
        par_names = {name: name for d in self.data for name in d.pars_init}
        val_dict = {
            name: val
            for d in self.data for name, val in d.pars_init.items()
        }
        lim_dict = {
            name: lim
            for d in self.data for name, lim in d.par_limit.items()
        }
        fix_dict = {
            name: fix
            for d in self.data for name, fix in d.par_fixed.items()
        }

        # Select the parameters we sample
        sampled_pars_ind = np.array(
            [i for i, val in enumerate(fix_dict.values()) if not val])
        npar = len(sampled_pars_ind)
        nder = 0

        # Get the limits for the free params
        limits = np.array(
            [list(lim_dict.values())[i] for i in sampled_pars_ind])
        names = np.array(
            [list(par_names.values())[i] for i in sampled_pars_ind])

        def log_lik(theta):
            ''' Wrapper for likelihood function passed to Polychord '''
            pars = val_dict.copy()
            for i, name in enumerate(names):
                pars[name] = theta[i]

            log_lik = self.log_lik(pars)
            return log_lik, []

        def prior(hypercube):
            ''' Uniform prior '''
            prior = []
            for i, lims in enumerate(limits):
                prior.append(UniformPrior(lims[0], lims[1])(hypercube[i]))
            return prior

        def dumper(live, dead, logweights, logZ, logZerr):
            ''' Dumper function empty for now '''
            pass

        # Get the settings we need and add defaults
        # These are the same as PolyChord recommends
        nlive = self.polychord_setup.getint('nlive', int(25 * npar))
        seed = self.polychord_setup.getint('seed', int(0))
        num_repeats = self.polychord_setup.getint('num_repeats', int(5 * npar))
        precision = self.polychord_setup.getfloat('precision', float(0.001))
        boost_posterior = self.polychord_setup.getfloat(
            'boost_posterior', float(0.0))
        resume = self.polychord_setup.getboolean('resume', True)
        cluster_posteriors = self.polychord_setup.getboolean(
            'cluster_posteriors', False)
        do_clustering = self.polychord_setup.getboolean('do_clustering', False)
        path = self.polychord_setup.get('path')
        filename = self.polychord_setup.get('name')
        write_live = self.polychord_setup.getboolean('write_live', False)
        write_dead = self.polychord_setup.getboolean('write_dead', True)
        write_prior = self.polychord_setup.getboolean('write_prior', False)

        # Initialize and run PolyChord
        settings = PolyChordSettings(npar,
                                     nder,
                                     base_dir=path,
                                     file_root=filename,
                                     seed=seed,
                                     nlive=nlive,
                                     precision_criterion=precision,
                                     num_repeats=num_repeats,
                                     boost_posterior=boost_posterior,
                                     cluster_posteriors=cluster_posteriors,
                                     do_clustering=do_clustering,
                                     equals=False,
                                     write_resume=resume,
                                     read_resume=resume,
                                     write_live=write_live,
                                     write_dead=write_dead,
                                     write_prior=write_prior)
        pypolychord.run_polychord(log_lik, npar, nder, settings, prior, dumper)
Example #17
0
 def run_pc(self, nlive=20):
     run_polychord(lambda x: self.logL(x), self.nDims, 0, self.settings,
                   lambda x: self.prior(x))
Example #18
0
def run(model, rundict, priordict, polysettings=None):
    """ 
    Runs PolyChord on the chosen data and model. When PolyChord is finished
    running it automatically runs a post processing script on the output creating
    plots of the posterior and saving basic information like the evidence and
    run settings on a text file for future reference.

    Parameters
    ----------
    model : object
        Custom model class that contains your desired model. This class has to
        have a method called log_likelihood(x) which takes a parameter array x
        and returns the corresponding log Likelihood. The order of the array x is
        given by the order that results from calling list(priordict.keys()) (see
        description for priordict for more information.)
        Your custom model class should inherit from either RVModel if it's a 
        radial velocities model or from BaseModel otherwise.
    rundict : dict
        Dictionary with basic information about the run itself. Keys it should 
        include are:
            target : Name of target or function to analyse
            runid : String to identify the specific model or configuration being
                    used
            comment (optional) : Optional comment for a third layer of identification
            prior_names (optional) : List with the names and ranges of the priors
            nplanets (optional) : Number of planets in the RV model. If not 
                                  provided, it will use the number of planets 
                                  present in the configfile.
            star_params (optional) : Dictionary with the stars parameters like
                                     star_mass (mass of the star in solar masses),
                                     star_radius (radius of star in solar radii),
                                     star_rot (roation period of star). These
                                     can be be ufloats to include the uncertainty.
            savedir (optional) : Save directory for the output of PolyChord
            order_planets (optionsl) : Boolean indicating if the planets should
                                       be ordered by period. This is to avoid
                                       periods jumping between the different 
                                       planets. Default is False.
    priordict : dict
        Dictionary with the priors to all free parameters. Keys are the names
        of the parameters. Values are object with a method .ppf(x) which is the 
        inverse of the CDF of the chosen probability distribution of the prior.
        It takes a uniformly sampled number between 0 and 1 and returns the 
        physical parameter distributed according to the prior. 
        The method log_likelihood in your custom model should take the same order
        or parameters that results from calling list(priordict.keys()).
    polysettings : dict, optional
        Dictionary containing custom parameters for PolyChord setting like nlive
        or nrepeats. If None are given the defualt PolyChord settings.

    Returns
    -------
    output : PolyChordOutput object
        Object with the PolyChord output. 
        Several attributes are added before returning. These are used for the 
        post processing script.
    """


    # Create list of parameter names
    parnames = model.parnames
    rundict_keys = list(rundict.keys())

    # Count "real" number of planets if not provided by rundict or model
    nplanets = 0
    if 'nplanets' in rundict_keys:
        # Check if nplanets is in rundict
        nplanets = rundict['nplanets']
    elif hasattr(model, 'nplanets'):
        # Check if nplanets is in the model
        nplanets = model.nplanets
    else:
        # Count number of planets by checking for periods
        for i, par in enumerate(parnames):
            if ('planet' in par) and ('period' in par):
                nplanets += 1

    planets = []
    planet_idxs = []
    for n in range(1, nplanets+1):
        planets.append([])
        for i, par in enumerate(parnames):
            if f'planet{n}' in par:
                planets[n-1].append(i)
                if 'period' in par:
                    planet_idxs.append(i)

    if rank == 0:
        if nplanets == 1:
            print(f'\nRunning {rundict["target"]} with {nplanets} planet\n')
        else:
            print(f'\nRunning {rundict["target"]} with {nplanets} planets\n')

    # Prepare run
    nderived = 0
    ndim = len(parnames)

    # Function to convert from hypercube to physical parameter space
    def prior(hypercube):
        """
        Converts a point in the unit hypercube to the physical parameters using
        their respective priors.
        """

        # Claculate physical parameters with ppf from prior
        theta = np.ones_like(hypercube)
        sorteduni_params_idxs = []
        sortedloguni_params_idxs = []
        for i in range(ndim):
            param = parnames[i]
            # Check for instances of SortedUniformPrior or LogSortedUniformPrior
            # In that case for now skip the priortransform calculation and keep track
            # of the parameters with the same sorted prior.
            if isinstance(priordict[param], priors.SortedUniformPrior):
                sorteduni_params_idxs.append(i)
                prior_sortuni = priordict[param]
            elif isinstance(priordict[param], priors.LogSortedUniformPrior):
                sortedloguni_params_idxs.append(i)
                prior_sortloguni = priordict[param]
            else:
                theta[i] = priordict[param].ppf(hypercube[i])

        # Then calculate the sorted prior for those parameters, remembering where they were
        # in the array to insert the results back in the same order.
        if len(sorteduni_params_idxs) > 0:
            theta[sorteduni_params_idxs] = prior_sortuni(hypercube[sorteduni_params_idxs])

        if len(sortedloguni_params_idxs) > 0:
            theta[sortedloguni_params_idxs] = prior_sortloguni(hypercube[sortedloguni_params_idxs])

        return theta

    # LogLikelihood

    def loglike(x):
        """
        Calculates de logarithm of the Likelihood given the parameter vector x. 
        """

        return (model.log_likelihood(x), [])

    # Starting time to identify this specific run
    # If it's being run with more than one core the isodate on the first core
    # is broadcasted to the rest so they all share the same variable
    isodate = datetime.datetime.today().isoformat()
    if size > 1:
        isodate = comm.bcast(isodate, root=0)

    # Create PolyChordSettings object for this run
    settings = set_polysettings(
        rundict, polysettings, ndim, nderived, isodate, parnames)

    print(f'Saving results to {os.path.join(rundict["save_dir"], settings.file_root)}\n')

    # Initialise clocks
    ti = time.process_time()

    # ----- Run PolyChord ------
    output = run_polychord(loglike, ndim, nderived, settings, prior)
    # --------------------------

    # Stop clocks
    tf = time.process_time()

    if size > 1:
        # Reduce clocks to min and max to get actual wall time
        ti = comm.reduce(ti, op=MPI.MIN, root=0)
        tf = comm.reduce(tf, op=MPI.MAX, root=0)

    # Save results
    if rank == 0:
        # Cleanup of parameter names
        paramnames = [(x, x) for x in parnames]
        output.make_paramnames_files(paramnames)
        # Delete loglike and weight columns
        # del output.samples['loglike']
        # del output.samples['weight']
        # old_cols = output.samples.columns.values.tolist()
        # output.samples.rename(columns=dict(
        #     zip(old_cols, parnames)), inplace=True)

        # Assign additional parameters to output
        output.runtime = datetime.timedelta(seconds=tf-ti)
        output.rundict = rundict.copy()
        output.datadict = dict(model.datadict)
        output.fixedpardict = dict(model.fixedpardict)
        output.model_name = str(model.model_path.stem)
        output.nlive = settings.nlive
        output.nrepeats = settings.num_repeats
        output.isodate = isodate
        output.ncores = size
        output.parnames = parnames
        output.ndim = ndim
        output.sampler = 'PolyChord'

        # Add additional information if provided
        if 'prior_names' in rundict_keys:
            output.priors = rundict['prior_names']

        if 'star_params' in rundict_keys:
            output.starparams = rundict['star_params']

        # Print run time
        print(f'\nTotal run time was: {output.runtime}')

        # Save output as pickle file
        dump2pickle_poly(output, output.file_root+'.pkl')

        base_dir_parent = str(Path(output.base_dir).parent.absolute())
        runid_dir = Path(output.base_dir).parent.parent.absolute()
        # Save model as pickle file
        shutil.copy(model.model_path, base_dir_parent)
        # dump2pickle_poly(model, 'model.pkl', savedir=base_dir_parent)

        # Copy post processing script to this run's folder
        parent = Path(__file__).parent.parent.absolute()
        shutil.copy(os.path.join(parent, 'post_processing.py'), base_dir_parent)
        # Copy FIP criterion scirpt to parent of runid
        shutil.copy(os.path.join(parent, 'fip_criterion.py'), runid_dir)

        # Copy model file
        shutil.copy(model.model_path, base_dir_parent)

        # Run post processing script
        postprocess(base_dir_parent)

    return output
Example #19
0
def exe():
    run_polychord(log_likelihood, nDims + 1, 0, settings, prior)
# set an unlimited stack-size of PolyChord
curlimit = resource.getrlimit(resource.RLIMIT_STACK) # get current stack resource size
resource.setrlimit(resource.RLIMIT_STACK, (resource.RLIM_INFINITY,resource.RLIM_INFINITY)) # set to unlimited

# setup run settings using the PolyChordSetting class
pargs = {'nlive': nlive,
         'precision_criterion': tol,
         'base_dir': basedir,
         'file_root': fileroot,
         'write_resume': False, # don't output a resume file
         'read_resume': False}  # don't read a resume file
settings = PolyChordSettings(ndims, nderived, **pargs)

# run nested sampling
output = pypolychord.run_polychord(loglikelihood_polychord, ndims, nderived, settings, prior_transform_polychord)

# reset stack resource size
resource.setrlimit(resource.RLIMIT_STACK, curlimit)

# output marginal likelihood
print('Marginalised evidence is {} ± {}'.format(output.logZ, output.logZerr))

# plot posterior samples (if corner.py is installed)
try:
    import matplotlib as mpl
    mpl.use("Agg") # force Matplotlib backend to Agg
    import corner # import corner.py
except ImportError:
    sys.exit(1)
Example #21
0
def run_polychord(loglikelihood, prior, dumper, nDims, nlive, root, ndump,
                  num_repeats):
    """Run PolyChord.

    See https://arxiv.org/abs/1506.00171 for more detail

    Parameters
    ----------
    loglikelihood: :obj:`callable`
        probability function taking a single parameter:

        - theta: numpy.array
                 physical parameters, `shape=(nDims,)`

        returning a log-likelihood (float)

    prior: :obj:`callable`
        tranformation function taking a single parameter

        - cube: numpy.array
                hypercube parameters, `shape=(nDims,)`

        returning physical parameters (`numpy.array`)

    dumper: :obj:`callable`
        access function called every nlive iterations giving a window onto
        current live points. Single parameter, no return:

        - live:
               `numpy.array of` live parameters and loglikelihoods,
               `shape=(nlive,nDims+1)`

    nDims: int
        Dimensionality of sampling space

    nlive: int
        Number of live points

    root: str
        base name for output files

    ndump: int
        How many iterations between dumper function calls

    num_repeats: int
        Length of chain to generate new live points

    """
    import pypolychord
    from pypolychord.settings import PolyChordSettings

    nDerived = 0
    settings = PolyChordSettings(nDims, nDerived)
    settings.base_dir = os.path.dirname(root)
    settings.file_root = os.path.basename(root)
    settings.nlive = nlive
    settings.num_repeats = num_repeats
    settings.do_clustering = True
    settings.read_resume = False
    settings.compression_factor = numpy.exp(-float(ndump)/nlive)
    settings.precision_criterion = 0.01

    def polychord_loglikelihood(theta):
        return loglikelihood(theta), []

    def polychord_dumper(live, dead, logweights, logZ, logZerr):
        dumper(live[:, :-2], live[:, -1], dead[:, :-2], dead[:, -1])

    pypolychord.run_polychord(polychord_loglikelihood, nDims, nDerived,
                              settings, prior, polychord_dumper)
Example #22
0
def dumper(live, dead, logweights, logZ, logZerr):
    print("Last dead point:", dead[-1])


#| Initialise the settings

settings = PolyChordSettings(nDims, nDerived)
settings.file_root = 'gaussian'
settings.nlive = 200
settings.do_clustering = True
settings.read_resume = False

#| Run PolyChord

output = pypolychord.run_polychord(likelihood, nDims, nDerived, settings,
                                   prior, dumper)

#| Create a paramnames file

paramnames = [('p%i' % i, r'\theta_%i' % i) for i in range(nDims)]
paramnames += [('r*', 'r')]
output.make_paramnames_files(paramnames)

#| Make an anesthetic plot (could also use getdist)
try:
    from anesthetic import NestedSamples
    samples = NestedSamples(root=settings.base_dir + '/' + settings.file_root)
    fig, axes = samples.plot_2d(['p0', 'p1', 'p2', 'p3', 'r'])
    fig.savefig('posterior.pdf')

except ImportError:
Example #23
0
def run_polychord(loglikelihood, prior, dumper, nDims, nlive, root,
                  num_repeats):
    """ Wrapper function to run PolyChord

    See https://arxiv.org/abs/1506.00171 for more detail

    Parameters
    ----------
    loglikelihood: callable
        probability function taking a single parameter:

        - theta: numpy.array
                 physical parameters, `shape=(nDims,)`

        returning a log-likelihood (float)

    prior: callable
        tranformation function taking a single parameter

        - cube: numpy.array
                hypercube parameters, `shape=(nDims,)`

        returning physical parameters (`numpy.array`)

    dumper: callable
        access function called every nlive iterations giving a window onto
        current live points. Single parameter, no return:

        - live: numpy.array
               live parameters and loglikelihoods, `shape=(nlive,nDims+1)`

    nDims: int
        Dimensionality of sampling space

    nlive: int
        Number of live points

    root: str
        base name for output files

    num_repeats: int
        Length of chain to generate new live points
    """
    import pypolychord
    from pypolychord.settings import PolyChordSettings

    basedir = os.path.dirname(root)
    cluster_dir = os.path.join(basedir,'clusters')
    with suppress(Exception): os.makedirs(cluster_dir)

    nDerived = 0
    settings = PolyChordSettings(nDims, nDerived)
    settings.base_dir = os.path.dirname(root)
    settings.file_root = os.path.basename(root)
    settings.nlive = nlive
    settings.num_repeats = num_repeats
    settings.do_clustering = True
    settings.read_resume = False

    def polychord_loglikelihood(theta):
        return loglikelihood(theta), []

    def polychord_dumper(live, dead, logweights, logZ, logZerr):
        dumper(live[:, :-1])

    pypolychord.run_polychord(polychord_loglikelihood, nDims, nDerived,
                              settings, prior, polychord_dumper)
Example #24
0
    def PC_fit(self,nlive_const='auto', dynamic=True,dynamic_goal=1.0, ninit=100, 
                 basename='dypc_chains', verbose=True, plot=False):
        '''
        Parameters
        ----------
        dynamic_goal : float, opt
            Parameter in [0,1] determining whether algorithm prioritizes accuracy in 
            evidence accuracy (goal near 0) or parameter estimation (goal near 1).
        ninit : int, opt
            Number of live points to use in initial exploratory run. 
        nlive_const : int, opt
            Total computational budget, equivalent to non-dynamic nested sampling with nlive_const live points.
        dynamic : bool, opt
            If True, use dynamic nested sampling via dyPolyChord. Otherwise, use the
            standard PolyChord.
        basename : str, opt
            Location in which chains will be stored. 
        verbose : bool, opt
            If True, text will be output on the terminal to check how run is proceeding. 
        plot : bool, opt
            Display some sample plots to check result of dynamic slice nested sampling. 
        '''
        if dynamic:
            print('Dynamic slice nested sampling')
        else:
            print('Slice nested sampling')
            
        # obtain maximum likelihood fits
        theta0 = self.lineModel.guessFit()
        self.result_ml = self.optimizeFit(theta0)
        self.theta_ml = self.result_ml['x']

        # save theta_ml also in the lineModel object,
        # so that constraints may be set based on ML result
        self.lineModel.theta_ml = self.theta_ml
        
        # save moments obtained from maximum likelihood optimization
        self.m_ml = self.lineModel.modelMoments(self.theta_ml)
        
        # dimensionality of the problem
        self.ndim = int(self.lineModel.thetaLength())

        if dynamic:
            # dyPolyChord (dynamic slice nested sampling)
            # ------------------------------
            try:
                import dyPolyChord.pypolychord_utils
                import dyPolyChord
            except:
                print("********************")
                print("Could not import dyPolyChord! Make sure that this is in your PYTHONPATH.")
                print("PolyChord must also be on your LD_LIBRARY_PATH")
                raise ValueError("Abort BSFC fit")
        
            #Make a callable for running dyPolyChord
            my_callable = dyPolyChord.pypolychord_utils.RunPyPolyChord(
                self.PC_loglike,
                self.lineModel.hypercube_lnprior_generalized_simplex,
                self.ndim
            )
        
            # Specify sampler settings (see run_dynamic_ns.py documentation for more details)
            settings_dict = {'file_root': 'bsfc',
                             'base_dir': basename,
                             'seed': 1}

            # Run dyPolyChord
            MPI_parallel=True
            if MPI_parallel:
                from mpi4py import MPI
                comm = MPI.COMM_WORLD
                dyPolyChord.run_dypolychord(my_callable, dynamic_goal, settings_dict,
                                            ninit=ninit,
                                            nlive_const=int(25*self.ndim) if nlive_const=='auto' else nlive_const,
                                            comm=comm)
            else:
                dyPolyChord.run_dypolychord(my_callable, dynamic_goal, settings_dict,
                                            ninit=ninit,
                                            nlive_const=int(25*self.ndim) if nlive_const=='auto' else nlive_const)
                
        else:
            # PolyChord (slice nested sampling)
            # ------------------------------
            try:
                import pypolychord
                from pypolychord.settings import PolyChordSettings
            except:
                print("********************")
                print("Could not import pypolychord! Make sure that this is in your PYTHONPATH.")
                raise ValueError("Abort BSFC fit")
            
            nDerived=0
            settings = PolyChordSettings(self.ndim, nDerived)
            settings.file_root = 'bsfc'
            settings.base_dir = basename
            settings.nlive = int(25*self.ndim) if nlive_const=='auto' else int(nlive_const)
            #settings.do_clustering = True
            #settings.read_resume = False
            settings.feedback = 3
            
            def dumper(live, dead, logweights, logZ, logZerr):
                #print("Last dead point:", dead[-1])
                print("logZ = "+str(logZ)+"+/-"+str(logZerr))
                
            self.polychord_output = pypolychord.run_polychord(self.PC_loglike,
                                               self.ndim,
                                               nDerived,
                                               settings,
                                               self.lineModel.hypercube_lnprior_generalized_simplex,
                                               dumper)

        self.good=True
)
settings.do_clustering = args_params.noclust
settings.nlive = nDims * args_params.nlive
settings.base_dir = base_dir
settings.file_root = 'hd40307_k{}'.format(nplanets)  # modelpath[12:-3]
settings.num_repeats = nDims * args_params.nrep
settings.precision_criterion = args_params.prec
settings.read_resume = False

# Change settings if resume is true
if args_params.resume:
    settings.read_resume = args_params.resume
    settings.base_dir = dirname + prev_run

# Run PolyChord
output = PPC.run_polychord(logLikelihood, nDims, nDerived, settings, prior)

# Parameter names
# latexnames = [r'\sigma_J', r'C']
# for j in range(nplanets):
#     latexnames.extend(
#         [fr'K_{j}', fr'P_{j}', fr'e_{j}', fr'\omega_{j}', fr'M_{j}'])
# paramnames = [(x, latexnames[i]) for i, x in enumerate(parnames)]
paramnames = [(x, x) for x in parnames]

output.make_paramnames_files(paramnames)

end = time.time()  # End time
Dt = end - start
if rank == 0:
    print('\nTotal run time was: {}'.format(
Example #26
0
    resource.RLIMIT_STACK,
    (resource.RLIM_INFINITY, resource.RLIM_INFINITY))  # set to unlimited

# setup run settings using the PolyChordSetting class
pargs = {
    'nlive': nlive,
    'precision_criterion': tol,
    'base_dir': basedir,
    'file_root': fileroot,
    'write_resume': False,  # don't output a resume file
    'read_resume': False
}  # don't read a resume file
settings = PolyChordSettings(ndims, nderived, **pargs)

# run nested sampling
output = pypolychord.run_polychord(loglikelihood_polychord, ndims, nderived,
                                   settings, prior_transform_polychord)

# reset stack resource size
resource.setrlimit(resource.RLIMIT_STACK, curlimit)

# output marginal likelihood
print('Marginalised evidence is {} ± {}'.format(output.logZ, output.logZerr))

# plot posterior samples (if corner.py is installed)
try:
    import matplotlib as mpl
    mpl.use("Agg")  # force Matplotlib backend to Agg
    import corner  # import corner.py
except ImportError:
    sys.exit(1)
Example #27
0
def pc(test_statistic,
       transform,
       n_dim,
       observed,
       n_live=100,
       base_dir="chains/",
       file_root="pc_",
       do_clustering=False,
       resume=False,
       ev_data=False,
       feedback=0,
       **kwargs):
    """
    Nested sampling with PC
    """
    # copy key word arguments to settings object
    settings = PolyChordSettings(n_dim, 0, **kwargs)
    settings.nfail = n_live
    settings.precision_criterion = 0.
    settings.read_resume = resume
    settings.base_dir = base_dir
    settings.file_root = file_root
    settings.nlive = n_live
    settings.logLstop = observed
    settings.do_clustering = do_clustering
    settings.feedback = feedback

    loglike = pc_wrap(test_statistic)
    output = pypolychord.run_polychord(loglike, n_dim, 0, settings, transform,
                                       dumper(0, observed))

    # get number of calls directly
    calls = output.nlike

    # get log X from resume file

    label = "=== local volume -- log(<X_p>) ==="
    log_xp = None
    res_name = "{}/{}.resume".format(base_dir, file_root)

    with open(res_name) as res_file:
        for line in res_file:
            if line.strip() == label:
                next_line = res_file.readline()
                log_xp = np.array([float(e) for e in next_line.split()])
                break
        else:
            raise RuntimeError("didn't find {}".format(label))

    log_x = logsumexp(log_xp)
    n_iter = -log_x * n_live

    if not ev_data:
        return Result.from_ns(n_iter, n_live, calls)

    # get ev data
    ev_name = "{}/{}_dead.txt".format(base_dir, file_root)
    ev_data = np.genfromtxt(ev_name)
    test_statistic = ev_data[:, 0]
    log_x = -np.arange(0, len(test_statistic), 1.) / n_live
    log_x_delta = np.sqrt(-log_x / n_live)

    return Result.from_ns(n_iter, n_live,
                          calls), [test_statistic, log_x, log_x_delta]
Example #28
0
from pypolychord.settings import PolyChordSettings
import matplotlib.pyplot as plt
import pypolychord as ppc
from pypolychord.priors import UniformPrior


def quantile(cube):
    return UniformPrior(-10, 10)(cube)


def lnL(theta):
    return theta**2


settings = PolyChordSettings(2, 0)
ppc.run_polychord(lnL, 2, 0, settings, quantile)
Example #29
0
def runpoly(configfile, nlive=None, nplanets=None, modelargs={}, **kwargs):

    # Read dictionaries from configuration file
    rundict, datadict, priordict, fixeddict, priors = read_config(configfile, nplanets)
    parnames = list(priordict.keys())

    # Import model module
    models_path = os.path.join(HOME, 'run/targets/{target}/models/{runid}'.format(**rundict))
    modulename = 'model_{target}_{runid}'.format(**rundict)
    sys.path.insert(0, models_path)
    mod = importlib.import_module(modulename) # modulename, models_path)

    # Instantiate model class (pass additional arguments)
    mymodel = mod.Model(fixeddict, datadict, parnames, **modelargs)

    # Function to convert from hypercube to physical parameter space
    def prior(hypercube):
        """ 
        Convert a point in the unit hypercube to the physical parameters using
        their respective priors. 
        """

        theta = []
        for i, x in enumerate(hypercube):
            theta.append(priordict[parnames[i]].ppf(x))
        return theta    

    
    # LogLikelihood
    def loglike(x):
        """ 
        Calculates de logarithm of the Likelihood given the parameter vector x. 
        """

        loglike.nloglike += 1 # Add one to the likelihood calculations counter
        return (mymodel.lnlike(x), [])
    loglike.nloglike = 0 # Likelihood calculations counter

    # Prepare run
    nderived = 0
    ndim = len(parnames)

    # Starting time to identify this specific run
    # Define it only for rank 0 and broadcoast to the rest
    if rank == 0:
        isodate = datetime.datetime.today().isoformat()
    else:
        isodate = None
    isodate = comm.bcast(isodate, root=0)

    # Define PolyChord settings
    settings = polysettings.PolyChordSettings(ndim, nderived, )
    settings.do_clustering = True
    if nlive is None:
        settings.nlive = 25*ndim
    else:
        settings.nlive = nlive*ndim

    # Define fileroot name (identifies this specific run)
    fileroot = rundict['target']+'_'+rundict['runid']
    if rundict['comment'] != '':
        fileroot += '-' + rundict['comment']

    # Label the run with nr of planets, live points, nr of cores, sampler and date
    fileroot += '_k{}'.format(mymodel.nplanets)
    fileroot += '_nlive{}'.format(settings.nlive)
    fileroot += '_ncores{}'.format(size)
    fileroot += '_polychord'
    fileroot += '_'+isodate

    settings.file_root = fileroot
    settings.read_resume = False
    settings.num_repeats = ndim * 5
    settings.feedback = 1
    settings.precision_criterion = 0.01
    # Base directory
    ref_dir = os.path.join('ExP', rundict['target'], rundict['runid'], fileroot, 'polychains')
    if 'spectro' in HOME:
        # If it's runing in cluster -> save in scratch folder
        base_dir = os.path.join('/scratch/nunger', ref_dir)
    else:
        # Running locally
        base_dir = os.path.join(HOME, ref_dir)
    settings.base_dir = base_dir

    # Initialise clocks
    ti = time.process_time()

    # ----- Run PolyChord ------
    output = polychord.run_polychord(loglike, ndim, nderived, settings, prior)
    # --------------------------

    # Stop clocks
    tf = time.process_time()
    # Reduce clocks to min and max to get actual wall time
    ti = comm.reduce(ti, op=MPI.MIN, root=0)
    tf = comm.reduce(tf, op=MPI.MAX, root=0)

    # Gather all the number of likelihood calculations and sum to get total
    nlog = comm.reduce(loglike.nloglike, op=MPI.SUM, root=0)

    # Save results
    if rank == 0:
        # Cleanup of parameter names
        paramnames = [(x, x) for x in parnames]
        output.make_paramnames_files(paramnames)
        parnames.insert(0, 'loglike')
        parnames.insert(0, 'weight')
        old_cols = output.samples.columns.values.tolist()
        output.samples.rename(columns=dict(zip(old_cols, parnames)), inplace=True)

        # Assign additional parameters to output
        output.runtime = datetime.timedelta(seconds=tf-ti)
        output.target = rundict['target']
        output.runid = rundict['runid']
        output.comment = rundict.get('comment', '')
        output.nplanets = mymodel.nplanets
        output.nlive = settings.nlive
        output.nrepeats = settings.num_repeats
        output.isodate = isodate
        output.ncores = size
        output.priors = priors
        output.starparams = rundict['star_params']
        output.datadict = datadict
        output.parnames = parnames
        output.fixeddict = fixeddict
        output.nloglike = nlog

        # Print run time
        print('\nTotal run time was: {}'.format(output.runtime))

        # Save output as pickle file
        dump2pickle_poly(output)

        # Copy post processing script to this run's folder
        shutil.copy(os.path.join(HOME,'run/post_processing.py'), os.path.join(output.base_dir, '..'))

        # Copy model file to this run's folder
        model = os.path.join(models_path, modulename+'.py')
        shutil.copy(model, os.path.join(output.base_dir, '..'))

    return output