Exemple #1
0
    def polychord_sampler(self):
        import pypolychord
        from pypolychord.settings import PolyChordSettings
        from pypolychord.priors import UniformPrior, GaussianPrior

        ndim = len(self.params.p0)
        nder = 0

        # Log-likelihood compliant with PolyChord's input
        def likelihood(theta):
            return self.lnlike(theta), [0]

        def prior(hypercube):
            prior = []
            for h, pr in zip(hypercube, self.params.p_free_priors):
                if pr[1] == 'Gaussian':
                    prior.append(
                        GaussianPrior(float(pr[2][0]), float(pr[2][1]))(h))
                else:
                    prior.append(
                        UniformPrior(float(pr[2][0]), float(pr[2][2]))(h))
            return prior

        # Optional dumper function giving run-time read access to
        # the live points, dead points, weights and evidences
        def dumper(live, dead, logweights, logZ, logZerr):
            print("Last dead point:", dead[-1])

        settings = PolyChordSettings(ndim, nder)
        settings.base_dir = self.get_output(
            'param_chains')[:-4]  # Remove ".npz"
        settings.file_root = 'pch'
        settings.nlive = self.config['nlive']
        settings.num_repeats = self.config['nrepeat']
        settings.do_clustering = False  # Assume unimodal posterior
        settings.boost_posterior = 10  # Increase number of posterior samples
        settings.nprior = 200  # Draw nprior initial prior samples
        settings.maximise = True  # Maximize posterior at the end
        settings.read_resume = False  # Read from resume file of earlier run
        settings.feedback = 2  # Verbosity {0,1,2,3}

        output = pypolychord.run_polychord(likelihood, ndim, nder, settings,
                                           prior, dumper)

        return output
Exemple #2
0
    def run(self):
        """
        Executes the inference
        """
        if self.prepare():
            # Setup the inference
            ndim = np.sum(self.pstep > 0)
            settings = PolyChordSettings(ndim, 0)
            settings.base_dir = self.outputdir
            settings.file_root = self.fprefix
            settings.nlive = self.nlive
            settings.read_resume = self.resume
            if self.nrepeat is not None:
                settings.num_repeat = self.nrepeat
            settings.precision_criterion = self.dlogz
            settings.grade_dims = [int(ndim)]
            settings.read_resume = False
            settings.feedback = self.verb
            # Run it
            if self.dumper is not None:
                out = pypolychord.run_polychord(self.loglike, ndim, 0,
                                                settings, self.prior,
                                                self.dumper)
            else:
                out = pypolychord.run_polychord(self.loglike, ndim, 0,
                                                settings, self.prior)

            outp = np.loadtxt(os.path.join(self.outputdir, self.fprefix) +\
                                   '_equal_weights.txt')
            self.outp = outp[:, 2:].T
            ibest = np.argmin(outp[:, 1])
            self.bestp = self.outp[:, ibest]
            # Save posterior and bestfit params
            if self.fsavefile is not None:
                np.save(self.fsavefile, self.outp)
            if self.fbestp is not None:
                np.save(self.fbestp, self.bestp)
            return self.outp, self.bestp
        else:
            if self.verb:
                print("Sampler is not fully prepared to run. " + \
                      "Correct the above errors and try again.")
Exemple #3
0
    def PC_fit(self,nlive_const='auto', dynamic=True,dynamic_goal=1.0, ninit=100, 
                 basename='dypc_chains', verbose=True, plot=False):
        '''
        Parameters
        ----------
        dynamic_goal : float, opt
            Parameter in [0,1] determining whether algorithm prioritizes accuracy in 
            evidence accuracy (goal near 0) or parameter estimation (goal near 1).
        ninit : int, opt
            Number of live points to use in initial exploratory run. 
        nlive_const : int, opt
            Total computational budget, equivalent to non-dynamic nested sampling with nlive_const live points.
        dynamic : bool, opt
            If True, use dynamic nested sampling via dyPolyChord. Otherwise, use the
            standard PolyChord.
        basename : str, opt
            Location in which chains will be stored. 
        verbose : bool, opt
            If True, text will be output on the terminal to check how run is proceeding. 
        plot : bool, opt
            Display some sample plots to check result of dynamic slice nested sampling. 
        '''
        if dynamic:
            print('Dynamic slice nested sampling')
        else:
            print('Slice nested sampling')
            
        # obtain maximum likelihood fits
        theta0 = self.lineModel.guessFit()
        self.result_ml = self.optimizeFit(theta0)
        self.theta_ml = self.result_ml['x']

        # save theta_ml also in the lineModel object,
        # so that constraints may be set based on ML result
        self.lineModel.theta_ml = self.theta_ml
        
        # save moments obtained from maximum likelihood optimization
        self.m_ml = self.lineModel.modelMoments(self.theta_ml)
        
        # dimensionality of the problem
        self.ndim = int(self.lineModel.thetaLength())

        if dynamic:
            # dyPolyChord (dynamic slice nested sampling)
            # ------------------------------
            try:
                import dyPolyChord.pypolychord_utils
                import dyPolyChord
            except:
                print("********************")
                print("Could not import dyPolyChord! Make sure that this is in your PYTHONPATH.")
                print("PolyChord must also be on your LD_LIBRARY_PATH")
                raise ValueError("Abort BSFC fit")
        
            #Make a callable for running dyPolyChord
            my_callable = dyPolyChord.pypolychord_utils.RunPyPolyChord(
                self.PC_loglike,
                self.lineModel.hypercube_lnprior_generalized_simplex,
                self.ndim
            )
        
            # Specify sampler settings (see run_dynamic_ns.py documentation for more details)
            settings_dict = {'file_root': 'bsfc',
                             'base_dir': basename,
                             'seed': 1}

            # Run dyPolyChord
            MPI_parallel=True
            if MPI_parallel:
                from mpi4py import MPI
                comm = MPI.COMM_WORLD
                dyPolyChord.run_dypolychord(my_callable, dynamic_goal, settings_dict,
                                            ninit=ninit,
                                            nlive_const=int(25*self.ndim) if nlive_const=='auto' else nlive_const,
                                            comm=comm)
            else:
                dyPolyChord.run_dypolychord(my_callable, dynamic_goal, settings_dict,
                                            ninit=ninit,
                                            nlive_const=int(25*self.ndim) if nlive_const=='auto' else nlive_const)
                
        else:
            # PolyChord (slice nested sampling)
            # ------------------------------
            try:
                import pypolychord
                from pypolychord.settings import PolyChordSettings
            except:
                print("********************")
                print("Could not import pypolychord! Make sure that this is in your PYTHONPATH.")
                raise ValueError("Abort BSFC fit")
            
            nDerived=0
            settings = PolyChordSettings(self.ndim, nDerived)
            settings.file_root = 'bsfc'
            settings.base_dir = basename
            settings.nlive = int(25*self.ndim) if nlive_const=='auto' else int(nlive_const)
            #settings.do_clustering = True
            #settings.read_resume = False
            settings.feedback = 3
            
            def dumper(live, dead, logweights, logZ, logZerr):
                #print("Last dead point:", dead[-1])
                print("logZ = "+str(logZ)+"+/-"+str(logZerr))
                
            self.polychord_output = pypolychord.run_polychord(self.PC_loglike,
                                               self.ndim,
                                               nDerived,
                                               settings,
                                               self.lineModel.hypercube_lnprior_generalized_simplex,
                                               dumper)

        self.good=True
Exemple #4
0
# In this module I'm going to investigate the effect of the so-called
# Lasenby parameter as well as the effects of proper prior
# repartitioning, and benchmark to see a speedup.

# Vanilla

mu = numpy.array([1.0, 2.5])
cov = numpy.array([[1.0, 0.6], [0.6, 1.0]])
nDims = mu.size

settings = PolyChordSettings(nDims, 0)
settings.file_root = 'vanilla'
settings.nlive = 10**3
settings.read_resume = False
settings.do_clustering = True
settings.feedback = 0


def gaussian_likelihood(theta):
    invSig = numpy.linalg.inv(cov)
    norm = numpy.linalg.slogdet(2 * numpy.pi * cov)[1] / 2
    logL = -norm - (theta - mu) @ invSig @ (theta - mu) / 2
    return logL, []


def uniform_prior(point_in_hypercube):
    return pypolychord.priors.UniformPrior(-20, 20)(point_in_hypercube)


# try:
#     samples = anesthetic.NestedSamples(root='./chains/vanilla')
Exemple #5
0
def pc(test_statistic,
       transform,
       n_dim,
       observed,
       n_live=100,
       base_dir="chains/",
       file_root="pc_",
       do_clustering=False,
       resume=False,
       ev_data=False,
       feedback=0,
       **kwargs):
    """
    Nested sampling with PC
    """
    # copy key word arguments to settings object
    settings = PolyChordSettings(n_dim, 0, **kwargs)
    settings.nfail = n_live
    settings.precision_criterion = 0.
    settings.read_resume = resume
    settings.base_dir = base_dir
    settings.file_root = file_root
    settings.nlive = n_live
    settings.logLstop = observed
    settings.do_clustering = do_clustering
    settings.feedback = feedback

    loglike = pc_wrap(test_statistic)
    output = pypolychord.run_polychord(loglike, n_dim, 0, settings, transform,
                                       dumper(0, observed))

    # get number of calls directly
    calls = output.nlike

    # get log X from resume file

    label = "=== local volume -- log(<X_p>) ==="
    log_xp = None
    res_name = "{}/{}.resume".format(base_dir, file_root)

    with open(res_name) as res_file:
        for line in res_file:
            if line.strip() == label:
                next_line = res_file.readline()
                log_xp = np.array([float(e) for e in next_line.split()])
                break
        else:
            raise RuntimeError("didn't find {}".format(label))

    log_x = logsumexp(log_xp)
    n_iter = -log_x * n_live

    if not ev_data:
        return Result.from_ns(n_iter, n_live, calls)

    # get ev data
    ev_name = "{}/{}_dead.txt".format(base_dir, file_root)
    ev_data = np.genfromtxt(ev_name)
    test_statistic = ev_data[:, 0]
    log_x = -np.arange(0, len(test_statistic), 1.) / n_live
    log_x_delta = np.sqrt(-log_x / n_live)

    return Result.from_ns(n_iter, n_live,
                          calls), [test_statistic, log_x, log_x_delta]
Exemple #6
0
# | The derived parameter is the squared radius

nDims = 4
nDerived = 0
sigma = 0.1
mu = 0
nlive = 72
thetamin, thetamax = -1000, 1000
betamin, betamax = 0, 1
sigmaLL = ((thetamax - thetamin) / sqrt(2 * pi))
# outputs = {}
# samples = {}
# colors = {}
normal_settings = PolyChordSettings(nDims, nDerived)
normal_settings.read_resume = False
normal_settings.feedback = 0
normal_settings.nlive = nlive
normal_settings.nDims = nDims
normal_settings.nDerived = nDerived

repart_settings = PolyChordSettings(nDims + 1, nDerived)
repart_settings.read_resume = False
repart_settings.feedback = 0
repart_settings.nlive = nlive
repart_settings.nDims = nDims + 1
repart_settings.nDerived = nDerived


def rank_nlike_calls(outs):
    for k in sorted(outputs, key=(lambda x: outputs[x].nlike)):
        print('{} {:.2E}'.format(k, outs[k].nlike))