def compute_fit(self): self._polychord_output = None data = self._observed.spectrum datastd = self._observed.errorBar sqrtpi = np.sqrt(2 * np.pi) ndim = len(self.fitting_parameters) def polychord_loglike(cube): # log-likelihood function called by polychord fit_params_container = np.array( [cube[i] for i in range(len(self.fitting_parameters))]) chi_t = self.chisq_trans(fit_params_container, data, datastd) # print('---------START---------') # print('chi_t',chi_t) # print('LOG',loglike) loglike = -np.sum(np.log(datastd * sqrtpi)) - 0.5 * chi_t return loglike, [0.0] def polychord_uniform_prior(hypercube): # prior distributions called by polychord. Implements a uniform prior # converting parameters from normalised grid to uniform prior # print(type(cube)) cube = [0.0] * ndim for idx, bounds in enumerate(self.fit_boundaries): # print(idx,self.fitting_parameters[idx]) bound_min, bound_max = bounds cube[idx] = (hypercube[idx] * (bound_max - bound_min)) + bound_min #print('CUBE idx',cube[idx]) # print('-----------') return cube status = None datastd_mean = np.mean(datastd) settings = PolyChordSettings(ndim, 1) settings.nlive = ndim * 25 settings.num_repeats = ndim * 5 settings.do_clustering = self.do_clustering settings.num_repeats = ndim settings.precision_criterion = self.evidence_tolerance settings.logzero = -1e70 settings.read_resume = self.resume settings.base_dir = self.dir_polychord settings.file_root = '1-' self.warning('Number of dimensions {}'.format(ndim)) self.warning('Fitting parameters {}'.format(self.fitting_parameters)) self.info('Beginning fit......') pypolychord.run_polychord(polychord_loglike, ndim, 1, settings, polychord_uniform_prior) self._polychord_output = self.store_polychord_solutions() print(self._polychord_output)
def polychord_sampler(self): import pypolychord from pypolychord.settings import PolyChordSettings from pypolychord.priors import UniformPrior, GaussianPrior ndim = len(self.params.p0) nder = 0 # Log-likelihood compliant with PolyChord's input def likelihood(theta): return self.lnlike(theta), [0] def prior(hypercube): prior = [] for h, pr in zip(hypercube, self.params.p_free_priors): if pr[1] == 'Gaussian': prior.append( GaussianPrior(float(pr[2][0]), float(pr[2][1]))(h)) else: prior.append( UniformPrior(float(pr[2][0]), float(pr[2][2]))(h)) return prior # Optional dumper function giving run-time read access to # the live points, dead points, weights and evidences def dumper(live, dead, logweights, logZ, logZerr): print("Last dead point:", dead[-1]) settings = PolyChordSettings(ndim, nder) settings.base_dir = self.get_output( 'param_chains')[:-4] # Remove ".npz" settings.file_root = 'pch' settings.nlive = self.config['nlive'] settings.num_repeats = self.config['nrepeat'] settings.do_clustering = False # Assume unimodal posterior settings.boost_posterior = 10 # Increase number of posterior samples settings.nprior = 200 # Draw nprior initial prior samples settings.maximise = True # Maximize posterior at the end settings.read_resume = False # Read from resume file of earlier run settings.feedback = 2 # Verbosity {0,1,2,3} output = pypolychord.run_polychord(likelihood, ndim, nder, settings, prior, dumper) return output
def run(self): """ Executes the inference """ if self.prepare(): # Setup the inference ndim = np.sum(self.pstep > 0) settings = PolyChordSettings(ndim, 0) settings.base_dir = self.outputdir settings.file_root = self.fprefix settings.nlive = self.nlive settings.read_resume = self.resume if self.nrepeat is not None: settings.num_repeat = self.nrepeat settings.precision_criterion = self.dlogz settings.grade_dims = [int(ndim)] settings.read_resume = False settings.feedback = self.verb # Run it if self.dumper is not None: out = pypolychord.run_polychord(self.loglike, ndim, 0, settings, self.prior, self.dumper) else: out = pypolychord.run_polychord(self.loglike, ndim, 0, settings, self.prior) outp = np.loadtxt(os.path.join(self.outputdir, self.fprefix) +\ '_equal_weights.txt') self.outp = outp[:, 2:].T ibest = np.argmin(outp[:, 1]) self.bestp = self.outp[:, ibest] # Save posterior and bestfit params if self.fsavefile is not None: np.save(self.fsavefile, self.outp) if self.fbestp is not None: np.save(self.fbestp, self.bestp) return self.outp, self.bestp else: if self.verb: print("Sampler is not fully prepared to run. " + \ "Correct the above errors and try again.")
def run_polychord(num_live_points): try: import pypolychord from pypolychord.settings import PolyChordSettings from pypolychord.priors import UniformPrior except: raise ImportError("Polychord not installed.\nRun `git clone https://github.com/PolyChord/PolyChordLite.git \ncd PolyChordLite\npython setup.py install`.") def likelihood(theta): """ Simple Gaussian Likelihood""" nDims = len(theta) r2 = sum(theta**2) logL = -log(2*pi*sigma*sigma)*nDims/2.0 logL += -r2/2/sigma/sigma return logL, [r2] def prior(hypercube): """ Uniform prior from [-1,1]^D. """ return UniformPrior(-1, 1)(hypercube) def dumper(live, dead, logweights, logZ, logZerr): return # print("Last dead point:", dead[-1]) settings = PolyChordSettings(ndims, 1) settings.file_root = 'gaussian' settings.nlive = num_live_points settings.do_clustering = True settings.read_resume = False t0 = default_timer() output = pypolychord.run_polychord(likelihood, ndims, 1, settings, prior, dumper) run_time = default_timer() - t0 print("polychord log(Z):", output.logZ) return run_time
def PC_fit(self,nlive_const='auto', dynamic=True,dynamic_goal=1.0, ninit=100, basename='dypc_chains', verbose=True, plot=False): ''' Parameters ---------- dynamic_goal : float, opt Parameter in [0,1] determining whether algorithm prioritizes accuracy in evidence accuracy (goal near 0) or parameter estimation (goal near 1). ninit : int, opt Number of live points to use in initial exploratory run. nlive_const : int, opt Total computational budget, equivalent to non-dynamic nested sampling with nlive_const live points. dynamic : bool, opt If True, use dynamic nested sampling via dyPolyChord. Otherwise, use the standard PolyChord. basename : str, opt Location in which chains will be stored. verbose : bool, opt If True, text will be output on the terminal to check how run is proceeding. plot : bool, opt Display some sample plots to check result of dynamic slice nested sampling. ''' if dynamic: print('Dynamic slice nested sampling') else: print('Slice nested sampling') # obtain maximum likelihood fits theta0 = self.lineModel.guessFit() self.result_ml = self.optimizeFit(theta0) self.theta_ml = self.result_ml['x'] # save theta_ml also in the lineModel object, # so that constraints may be set based on ML result self.lineModel.theta_ml = self.theta_ml # save moments obtained from maximum likelihood optimization self.m_ml = self.lineModel.modelMoments(self.theta_ml) # dimensionality of the problem self.ndim = int(self.lineModel.thetaLength()) if dynamic: # dyPolyChord (dynamic slice nested sampling) # ------------------------------ try: import dyPolyChord.pypolychord_utils import dyPolyChord except: print("********************") print("Could not import dyPolyChord! Make sure that this is in your PYTHONPATH.") print("PolyChord must also be on your LD_LIBRARY_PATH") raise ValueError("Abort BSFC fit") #Make a callable for running dyPolyChord my_callable = dyPolyChord.pypolychord_utils.RunPyPolyChord( self.PC_loglike, self.lineModel.hypercube_lnprior_generalized_simplex, self.ndim ) # Specify sampler settings (see run_dynamic_ns.py documentation for more details) settings_dict = {'file_root': 'bsfc', 'base_dir': basename, 'seed': 1} # Run dyPolyChord MPI_parallel=True if MPI_parallel: from mpi4py import MPI comm = MPI.COMM_WORLD dyPolyChord.run_dypolychord(my_callable, dynamic_goal, settings_dict, ninit=ninit, nlive_const=int(25*self.ndim) if nlive_const=='auto' else nlive_const, comm=comm) else: dyPolyChord.run_dypolychord(my_callable, dynamic_goal, settings_dict, ninit=ninit, nlive_const=int(25*self.ndim) if nlive_const=='auto' else nlive_const) else: # PolyChord (slice nested sampling) # ------------------------------ try: import pypolychord from pypolychord.settings import PolyChordSettings except: print("********************") print("Could not import pypolychord! Make sure that this is in your PYTHONPATH.") raise ValueError("Abort BSFC fit") nDerived=0 settings = PolyChordSettings(self.ndim, nDerived) settings.file_root = 'bsfc' settings.base_dir = basename settings.nlive = int(25*self.ndim) if nlive_const=='auto' else int(nlive_const) #settings.do_clustering = True #settings.read_resume = False settings.feedback = 3 def dumper(live, dead, logweights, logZ, logZerr): #print("Last dead point:", dead[-1]) print("logZ = "+str(logZ)+"+/-"+str(logZerr)) self.polychord_output = pypolychord.run_polychord(self.PC_loglike, self.ndim, nDerived, settings, self.lineModel.hypercube_lnprior_generalized_simplex, dumper) self.good=True
import anesthetic from scipy.special import erfinv import time # In this module I'm going to investigate the effect of the so-called # Lasenby parameter as well as the effects of proper prior # repartitioning, and benchmark to see a speedup. # Vanilla mu = numpy.array([1.0, 2.5]) cov = numpy.array([[1.0, 0.6], [0.6, 1.0]]) nDims = mu.size settings = PolyChordSettings(nDims, 0) settings.file_root = 'vanilla' settings.nlive = 10**3 settings.read_resume = False settings.do_clustering = True settings.feedback = 0 def gaussian_likelihood(theta): invSig = numpy.linalg.inv(cov) norm = numpy.linalg.slogdet(2 * numpy.pi * cov)[1] / 2 logL = -norm - (theta - mu) @ invSig @ (theta - mu) / 2 return logL, [] def uniform_prior(point_in_hypercube): return pypolychord.priors.UniformPrior(-20, 20)(point_in_hypercube)
""" Uniform prior from [-1,1]^D. """ return UniformPrior(-1, 1)(hypercube) #| Optional dumper function giving run-time read access to #| the live points, dead points, weights and evidences def dumper(live, dead, logweights, logZ, logZerr): print("Last dead point:", dead[-1]) #| Initialise the settings settings = PolyChordSettings(nDims, nDerived) settings.file_root = 'gaussian' settings.nlive = 200 settings.do_clustering = True settings.read_resume = False #| Run PolyChord output = pypolychord.run_polychord(likelihood, nDims, nDerived, settings, prior, dumper) #| Create a paramnames file paramnames = [('p%i' % i, r'\theta_%i' % i) for i in range(nDims)] paramnames += [('r*', 'r')] output.make_paramnames_files(paramnames)
# param : array def prior(hypercube): """ Uniform prior from [-1,1]^D. """ return UniformPrior(-1, 1)(hypercube) # array # param : array, array, array, float, float def dumper(live, dead, logweights, logZ, logZerr): print("Last dead point:", dead[-1]) # prints last element of dead (wich is an array) settings = PolyChordSettings(nDims, nDerived) #settings is an object settings.file_root = functionName #string settings.do_clustering = True settings.read_resume = False output = pypolychord.run_polychord(likelihood, nDims, nDerived, settings, prior, dumper) paramnames = [('p%i' % i, r'\theta_%i' % i) for i in range(nDims)] paramnames += [('r*', 'r')] output.make_paramnames_files(paramnames) try: import getdist.plots import matplotlib.pyplot as plt posterior = output.posterior g = getdist.plots.getSubplotPlotter() g.triangle_plot(posterior, filled=True)
theta = [0.0] * nDims for i, x in enumerate(hypercube): theta[i] = priordict[parnames[i]].ppf(x) return theta # Define PolyChord settings settings = PolyChordSettings( nDims, nDerived, ) settings.do_clustering = args_params.noclust settings.nlive = nDims * args_params.nlive settings.base_dir = base_dir settings.file_root = 'hd40307_k{}'.format(nplanets) # modelpath[12:-3] settings.num_repeats = nDims * args_params.nrep settings.precision_criterion = args_params.prec settings.read_resume = False # Change settings if resume is true if args_params.resume: settings.read_resume = args_params.resume settings.base_dir = dirname + prev_run # Run PolyChord output = PPC.run_polychord(logLikelihood, nDims, nDerived, settings, prior) # Parameter names # latexnames = [r'\sigma_J', r'C'] # for j in range(nplanets):
# return UniformPrior(-5, 5)(cube) def log_likelihood(theta): x = theta[:-1] beta = theta[-1:].item() # norm = norm = numpy.log(numpy.product(numpy.diagonal(numpy.pi * 2 * cov))) / 2 gaussian = -norm - numpy.sum(((x - mu_pi) / sig_prior)**2) / 2 # return (2-beta)*(gaussian) + numpy.log(zed(beta)), [] # print(gaussian, log_zed(beta), beta) return (2 - beta) * gaussian + log_zed(beta), [] # return gaussian, [] # return -numpy.linalg.slogdet(cov)[1]/2 - (x - mu_pi)@ numpy.linalg.inv(cov)@(x - mu_pi)/2, [] settings = PolyChordSettings(nDims + 1, 0) settings.file_root = 'ppr' settings.nlive = 200 settings.do_clustering = True settings.read_resume = False def exe(): run_polychord(log_likelihood, nDims + 1, 0, settings, prior) samples = anesthetic.NestedSamples(root='./chains/ppr') # floating point overflow: # zed(0.5) = -inf
def run_polychord(loglikelihood, prior, dumper, nDims, nlive, root, ndump, num_repeats): """Run PolyChord. See https://arxiv.org/abs/1506.00171 for more detail Parameters ---------- loglikelihood: :obj:`callable` probability function taking a single parameter: - theta: numpy.array physical parameters, `shape=(nDims,)` returning a log-likelihood (float) prior: :obj:`callable` tranformation function taking a single parameter - cube: numpy.array hypercube parameters, `shape=(nDims,)` returning physical parameters (`numpy.array`) dumper: :obj:`callable` access function called every nlive iterations giving a window onto current live points. Single parameter, no return: - live: `numpy.array of` live parameters and loglikelihoods, `shape=(nlive,nDims+1)` nDims: int Dimensionality of sampling space nlive: int Number of live points root: str base name for output files ndump: int How many iterations between dumper function calls num_repeats: int Length of chain to generate new live points """ import pypolychord from pypolychord.settings import PolyChordSettings nDerived = 0 settings = PolyChordSettings(nDims, nDerived) settings.base_dir = os.path.dirname(root) settings.file_root = os.path.basename(root) settings.nlive = nlive settings.num_repeats = num_repeats settings.do_clustering = True settings.read_resume = False settings.compression_factor = numpy.exp(-float(ndump)/nlive) settings.precision_criterion = 0.01 def polychord_loglikelihood(theta): return loglikelihood(theta), [] def polychord_dumper(live, dead, logweights, logZ, logZerr): dumper(live[:, :-2], live[:, -1], dead[:, :-2], dead[:, -1]) pypolychord.run_polychord(polychord_loglikelihood, nDims, nDerived, settings, prior, polychord_dumper)
def pc(test_statistic, transform, n_dim, observed, n_live=100, base_dir="chains/", file_root="pc_", do_clustering=False, resume=False, ev_data=False, feedback=0, **kwargs): """ Nested sampling with PC """ # copy key word arguments to settings object settings = PolyChordSettings(n_dim, 0, **kwargs) settings.nfail = n_live settings.precision_criterion = 0. settings.read_resume = resume settings.base_dir = base_dir settings.file_root = file_root settings.nlive = n_live settings.logLstop = observed settings.do_clustering = do_clustering settings.feedback = feedback loglike = pc_wrap(test_statistic) output = pypolychord.run_polychord(loglike, n_dim, 0, settings, transform, dumper(0, observed)) # get number of calls directly calls = output.nlike # get log X from resume file label = "=== local volume -- log(<X_p>) ===" log_xp = None res_name = "{}/{}.resume".format(base_dir, file_root) with open(res_name) as res_file: for line in res_file: if line.strip() == label: next_line = res_file.readline() log_xp = np.array([float(e) for e in next_line.split()]) break else: raise RuntimeError("didn't find {}".format(label)) log_x = logsumexp(log_xp) n_iter = -log_x * n_live if not ev_data: return Result.from_ns(n_iter, n_live, calls) # get ev data ev_name = "{}/{}_dead.txt".format(base_dir, file_root) ev_data = np.genfromtxt(ev_name) test_statistic = ev_data[:, 0] log_x = -np.arange(0, len(test_statistic), 1.) / n_live log_x_delta = np.sqrt(-log_x / n_live) return Result.from_ns(n_iter, n_live, calls), [test_statistic, log_x, log_x_delta]
def run_polychord(loglikelihood, prior, dumper, nDims, nlive, root, num_repeats): """ Wrapper function to run PolyChord See https://arxiv.org/abs/1506.00171 for more detail Parameters ---------- loglikelihood: callable probability function taking a single parameter: - theta: numpy.array physical parameters, `shape=(nDims,)` returning a log-likelihood (float) prior: callable tranformation function taking a single parameter - cube: numpy.array hypercube parameters, `shape=(nDims,)` returning physical parameters (`numpy.array`) dumper: callable access function called every nlive iterations giving a window onto current live points. Single parameter, no return: - live: numpy.array live parameters and loglikelihoods, `shape=(nlive,nDims+1)` nDims: int Dimensionality of sampling space nlive: int Number of live points root: str base name for output files num_repeats: int Length of chain to generate new live points """ import pypolychord from pypolychord.settings import PolyChordSettings basedir = os.path.dirname(root) cluster_dir = os.path.join(basedir,'clusters') with suppress(Exception): os.makedirs(cluster_dir) nDerived = 0 settings = PolyChordSettings(nDims, nDerived) settings.base_dir = os.path.dirname(root) settings.file_root = os.path.basename(root) settings.nlive = nlive settings.num_repeats = num_repeats settings.do_clustering = True settings.read_resume = False def polychord_loglikelihood(theta): return loglikelihood(theta), [] def polychord_dumper(live, dead, logweights, logZ, logZerr): dumper(live[:, :-1]) pypolychord.run_polychord(polychord_loglikelihood, nDims, nDerived, settings, prior, polychord_dumper)