def __init__(self, params, output_file, output_extra_params=[], nlive=None, num_repeats=None, do_clustering=None, read_resume=None, **kwargs): super().__init__(**arguments(exclude=['params'])) self.sampled = params.find_sampled() settings = self.settings = PolyChordSettings( len(self.sampled), len(self.output_extra_params)) output_file = './' + osp.normpath(output_file) settings.base_dir = osp.dirname(output_file) settings.file_root = osp.basename(output_file) if mpi.is_master() and not osp.exists(self.settings.base_dir): os.makedirs(self.settings.base_dir) for k in ['nlive', 'num_repeats', 'do_clustering', 'read_resume']: v = arguments()[k] if v is not None: setattr(settings, k, v)
return result, [] def prior(hypercube): """ Priors for each parameter. """ theta = [0.0] * nDims for i, x in enumerate(hypercube): theta[i] = priordict[parnames[i]].ppf(x) return theta # Define PolyChord settings settings = PolyChordSettings( nDims, nDerived, ) settings.do_clustering = args_params.noclust settings.nlive = nDims * args_params.nlive settings.base_dir = base_dir settings.file_root = 'hd40307_k{}'.format(nplanets) # modelpath[12:-3] settings.num_repeats = nDims * args_params.nrep settings.precision_criterion = args_params.prec settings.read_resume = False # Change settings if resume is true if args_params.resume: settings.read_resume = args_params.resume settings.base_dir = dirname + prev_run # Run PolyChord
# Filepath for the polychord data and storing the samples dirname = filepath if args_params.cluster: dirname = '/scratch/nunger/nathan' timecode = time.strftime("%m%d_%H%M") folder_path = 'saved_runs/nathan_model{}_'.format(model) + timecode if not args_params.save: # Save the samples in a dump folder if the data shouldn't be saved # This overwrites any data saved before of the same model folder_path = 'dump' # Define PolyChord settings settings = PolyChordSettings( nDims, nDerived, ) settings.do_clustering = args_params.noclust settings.nlive = nDims * args_params.nlive settings.base_dir = os.path.join(dirname, folder_path) settings.file_root = 'nathan_model{}'.format(model) settings.num_repeats = nDims * args_params.nrep settings.precision_criterion = args_params.prec settings.read_resume = False # Change settings if resume is true # if args_params.resume: # settings.read_resume = args_params.resume # settings.base_dir = dirname+prev_run # Save Parameter names list
theta = [0.0] * nDims for i, x in enumerate(hypercube): theta[i] = priordict[parnames[i]].ppf(x) return theta dirname = '/scratch/nunger/eprv3' timecode = time.strftime("%m%d_%H%M") folder_path = '000{}_{}a_'.format(datafile, nplanets) + timecode if args_params.narrow: folder_path = '000{}_{}b_'.format(datafile, nplanets) + timecode # Define PolyChord settings settings = PolyChordSettings( nDims, nDerived, ) settings.do_clustering = not args_params.noclust settings.nlive = args_params.nlive settings.num_repeats = nDims * args_params.nrep settings.base_dir = dirname + '/chains/' if args_params.save: # Save all the files from this run settings.base_dir = dirname + '/saved_runs/' + folder_path print(settings.base_dir) settings.file_root = 'eprv3' settings.read_resume = False settings.write_resume = False settings.precision_criterion = args_params.prec
def initialise(self): """Imports the PolyChord sampler and prepares its arguments.""" if not get_mpi_rank(): # rank = 0 (MPI master) or None (no MPI) self.log.info("Initializing") # If path not given, try using general path to modules path_to_installation = get_path_to_installation() if not self.path and path_to_installation: self.path = os.path.join(path_to_installation, "code", pc_repo_name) if self.path: if not get_mpi_rank(): self.log.info("Importing *local* PolyChord from " + self.path) pc_py_path = os.path.join(self.path, "PyPolyChord") pc_build_path = os.path.join(self.path, "build") post = next(d for d in os.listdir(pc_build_path) if d.startswith("lib.")) pc_build_path = os.path.join(pc_build_path, post) if not os.path.exists(pc_build_path): self.log.error( "Either PolyChord is not in the given folder, " "'%s', or you have not compiled it.", self.path) raise HandledException # Inserting the previously found path into the list of import folders sys.path.insert(0, pc_build_path) sys.path.insert(0, pc_py_path) else: self.log.info("Importing *global* PolyChord.") try: import PyPolyChord as PyPolyChord from PyPolyChord.settings import PolyChordSettings except ImportError: self.log.error( "Couldn't find the PolyChord python interface. " "Make sure that you have compiled it, and that you either\n" " (a) specify a path (you didn't) or\n" " (b) install the Python interface globally with\n" " '/path/to/PolyChord/python setup.py install --user'") raise HandledException self.pc = PyPolyChord # Prepare arguments and settings self.nDims = self.prior.d() self.nDerived = (len(self.parametrization.derived_params()) + 1 + len(self.likelihood._likelihoods)) self.pc_settings = PolyChordSettings(self.nDims, self.nDerived) for p in [ "nlive", "num_repeats", "nprior", "do_clustering", "precision_criterion", "max_ndead", "boost_posterior", "feedback", "update_files", "posteriors", "equals", "cluster_posteriors", "write_resume", "read_resume", "write_stats", "write_live", "write_dead", "base_dir", "grade_frac", "grade_dims" ]: v = getattr(self, p) if v is not None: setattr(self.pc_settings, p, v) # Fill the automatic ones if getattr(self, "feedback", None) is None: values = { logging.CRITICAL: 0, logging.ERROR: 0, logging.WARNING: 0, logging.INFO: 1, logging.DEBUG: 2 } self.pc_settings.feedback = values[self.log.getEffectiveLevel()] try: output_folder = getattr(self.output, "folder") output_prefix = getattr(self.output, "prefix") or "pc" except AttributeError: # dummy output -- no resume! from tempfile import gettempdir output_folder = gettempdir() from random import random output_prefix = hex(int(random() * 16**6))[2:] self.pc_settings.read_resume = False self.pc_settings.base_dir = os.path.join(output_folder, self.pc_settings.base_dir) self.pc_settings.file_root = output_prefix if not get_mpi_rank(): # Creating output folder, if it does not exist (just one process) if not os.path.exists(self.pc_settings.base_dir): os.makedirs(self.pc_settings.base_dir) # Idem, a clusters folder if needed -- notice that PolyChord's default # is "True", here "None", hence the funny condition below if self.pc_settings.do_clustering is not False: # None here means "default" try: os.makedirs( os.path.join(self.pc_settings.base_dir, clusters)) except OSError: # exists! pass self.log.info("Storing raw PolyChord output in '%s'.", self.pc_settings.base_dir) # explotining the speed hierarchy # sort blocks by paramters order and check contiguity (required by PolyChord!!!) # speeds, blocks = zip(*self.likelihood.speed_blocked_params(as_indices=True)) # speeds, blocks = np.array(speeds), np.array(blocks) # weird behaviour of np.argsort with there is only 1 block # if len(blocks) > 1: # sorting_indices = np.argsort(blocks, axis=0) # else: # sorting_indices = [0] # speeds, blocks = speeds[sorting_indices], blocks[sorting_indices] # if np.all([np.all(block==range(block[0], block[-1]+1)) for block in blocks]): self.log.warning("Speed hierarchy exploitation disabled for now!") # self.pc_args["grade_frac"] = list(speeds) # self.pc_args["grade_dims"] = [len(block) for block in blocks] # self.log.info("Exploiting a speed hierarchy with speeds %r and blocks %r", # speeds, blocks) # else: # self.log.warning("Some speed blocks are not contiguous: PolyChord cannot deal " # "with the speed hierarchy. Not exploting it.") # prior conversion from the hypercube bounds = self.prior.bounds( confidence_for_unbounded=self.confidence_for_unbounded) # Check if priors are bounded (nan's to inf) inf = np.where(np.isinf(bounds)) if len(inf[0]): params_names = self.prior.names() params = [params_names[i] for i in sorted(list(set(inf[0])))] self.log.error( "PolyChord needs bounded priors, but the parameter(s) '" "', '".join(params) + "' is(are) unbounded.") raise HandledException locs = bounds[:, 0] scales = bounds[:, 1] - bounds[:, 0] self.pc_prior = lambda x: (locs + np.array(x) * scales).tolist() # We will need the volume of the prior domain, since PolyChord divides by it self.logvolume = np.log(np.prod(scales)) # Done! if not get_mpi_rank(): self.log.info("Calling PolyChord with arguments:") for p, v in inspect.getmembers(self.pc_settings, lambda a: not (callable(a))): if not p.startswith("_"): self.log.info(" %s: %s", p, v)
exponent = -1 * np.sum((signal - model(theta, channel))**2) / (2*sigma**2) logL = (-N/2.)*np.log(2*np.pi) - N*np.log(sigma) + exponent return logL, [] def prior(hypercube): " Uniform Prior for [Tmin, Tmax]. " theta = [0.0] * nDims for i, x in enumerate(hypercube): theta[i] = UniformPrior(Tmin, Tmax)(x) return theta settings = PolyChordSettings(nDims, nDerived) settings.nlive = args_params.nlive * nDims settings.num_repeats = args_params.nrep * nDims settings.file_root = 'gregory' settings.do_clustering = True settings.read_resume = False start = time.time() output = PPC.run_polychord(logLikelihood, nDims, nDerived, settings, prior) # End time end = time.time() Dt = end - start print(f'\nTotal run time was: {datetime.timedelta(seconds=int(Dt))}') print(f'\nZ = {np.exp(output.logZ)}')
return logL, phi def prior(hypercube): """ Uniform prior from [-1,1]^D. """ # print 'hypercube=',hypercube theta = [0.0] * nDims for i, x in enumerate(hypercube): theta[i] = UniformPrior(-1, 1)(x) # print 'theta=',theta # yy=raw_input() return theta settings = PolyChordSettings(nDims, nDerived) settings.file_root = 'fy' settings.do_clustering = True settings.feedback=1 settings.base_dir='yyf_mpipoly' output = PyPolyChord.run_polychord(likelihood, nDims, nDerived, settings, prior) paramnames = [('p%i' % i, r'\theta_%i' % i) for i in range(nDims)] paramnames += [('r*', 'r')] output.make_paramnames_files(paramnames) # print prior(hypercube) try: import getdist.plots import matplotlib.pyplot as plt
return result, [] def prior(hypercube): """ Uniform Prior for [Tmin, Tmax]. """ theta = [0.0] * nDims for i, x in enumerate(hypercube): theta[i] = UniformPrior(Tmin, Tmax)(x) return theta # Define PolyChord settings settings = PolyChordSettings( nDims, nDerived, ) settings.do_clustering = True settings.nlive = 25 * nDims settings.file_root = 'multidimensional' settings.read_resume = False #settings.num_repeats = nDims * 5 # Run PolyChord output = PPC.run_polychord(logLikelihood, nDims, nDerived, settings, prior) print(output.logZs) print(output.logZerrs) end = time.time() print(f'Total run time was: {end-start}')
def PolyChordrun(LogLikelihood,n_dims,n_params,**kwargs): """ Function author: yuanfang PolyChord settings For full details of nested sampling and PolyChord, please refer to: arxiv:1506.00171[astro-ph.IM], 0809.34371[astro-ph] Parameters ---------- nDims: int Dimensionality of the model, i.e. the number of physical parameters. nDerived: int The number of derived parameters (can be 0). Keyword arguments ----------------- nlive: int (Default: nDims*25) The number of live points. Increasing nlive increases the accuracy of posteriors and evidences, and proportionally increases runtime ~ O(nlive). num_repeats : int (Default: nDims*5) The number of slice slice-sampling steps to generate a new point. Increasing num_repeats increases the reliability of the algorithm. Typically * for reliable evidences need num_repeats ~ O(5*nDims). * for reliable posteriors need num_repeats ~ O(nDims) nprior : int (Default: nlive) The number of prior samples to draw before starting compression. do_clustering : boolean (Default: True) Whether or not to use clustering at run time. feedback : {0,1,2,3} (Default: 1) How much command line feedback to give # Degree of feedback to provide #-1 | nothing # 0 | just header and tail # 1 | run time evidences and clustering information # 2 | fancy output # 3 | verbose precision_criterion : float (Default: 0.001) Termination criterion. Nested sampling terminates when the evidence contained in the live points is precision_criterion fraction of the total evidence. max_ndead : int (Default: -1) Alternative termination criterion. Stop after max_ndead iterations. Set negative to ignore (default). boost_posterior : float (Default: 0.0) Increase the number of posterior samples produced. This can be set arbitrarily high, but you won't be able to boost by more than num_repeats Warning: in high dimensions PolyChord produces _a lot_ of posterior samples. You probably don't need to change this posteriors : boolean (Default: True) Produce (weighted) posterior samples. Stored in <root>.txt. equals : boolean (Default: True) Produce (equally weighted) posterior samples. Stored in <root>_equal_weights.txt cluster_posteriors : boolean (Default: True) Produce posterior files for each cluster? Does nothing if do_clustering=False. write_resume : boolean (Default: True) Create a resume file. read_resume : boolean (Default: True) Read from resume file. write_stats : boolean (Default: True) Write an evidence statistics file. write_live : boolean (Default: True) Write a live points file. write_dead : boolean (Default: True) Write a dead points file. write_dead : boolean (Default: True) Write a prior points file. update_files : int (Default: nlive) How often to update the files in <base_dir>. base_dir : string (Default: 'chains') Where to store output files. file_root : string (Default: 'test') Root name of the files produced. grade_frac : List[float] (Default: 1) The amount of time to spend in each speed. grade_dims : List[int] (Default: 1) The number of parameters within each speed. """ try: import math from numpy import pi, log, sqrt import PyPolyChord from PyPolyChord.settings import PolyChordSettings from PyPolyChord.priors import UniformPrior except ImportError: print("INFO(Scanner):PyPolyChord module import error.exit...") sys.exit() # pass settings to polychord nDerived=n_params-n_dims settings = PolyChordSettings(n_dims, nDerived) # settings.nlive = kwargs.pop('nlive', n_dims*25) settings.nlive = kwargs.pop('n_live_points', n_dims*25) # settings.num_repeats = kwargs.pop('num_repeats', n_dims*5) settings.num_repeats = kwargs.pop('num_repeats', n_dims*2) # proposed by polychord eggbox settings.nprior = kwargs.pop('nprior', -1) settings.do_clustering = kwargs.pop('do_clustering', True) settings.feedback = kwargs.pop('feedback', 1) settings.precision_criterion = kwargs.pop('precision_criterion', 0.001) settings.max_ndead = kwargs.pop('max_ndead', -1) settings.boost_posterior = kwargs.pop('boost_posterior', 0.0) settings.posteriors = kwargs.pop('posteriors', True) settings.equals = kwargs.pop('equals', True) settings.cluster_posteriors = kwargs.pop('cluster_posteriors', True) settings.write_resume = kwargs.pop('write_resume', True) settings.write_paramnames = kwargs.pop('write_paramnames', False) settings.read_resume = kwargs.pop('read_resume', True) settings.write_stats = kwargs.pop('write_stats', True) settings.write_live = kwargs.pop('write_live', True) settings.write_dead = kwargs.pop('write_dead', True) settings.write_prior = kwargs.pop('write_prior', True) settings.update_files = kwargs.pop('update_files', settings.nlive) settings.base_dir = kwargs.pop('base_dir', 'chains') settings.file_root = kwargs.pop('file_root', 'test') settings.grade_dims = kwargs.pop('grade_dims', [n_dims]) settings.max_ndead=kwargs.pop('max_ndead ',-1) ES=kwargs.pop('ES') # settings.grade_frac = kwargs.pop('grade_frac', [1.0]*len(self.grade_dims)) if kwargs: raise TypeError('Unexpected **kwargs in Contours constructor: %r' % kwargs) # to speed up, set do_clustring = false # settings.do_clustering=False def my_prior(hypercube): ''' theta: the hypercube for input parameters len(theta)=n_dims Get the phiscal parameters from hypercube ''' theta=[0.0]*n_dims for i,name in enumerate(ES.InPar): if ES.InputPar[name][1].lower() == 'flat': min = float(ES.InputPar[name][2]) max = float(ES.InputPar[name][3]) theta[i] = hypercube[i] * (max - min) + min elif ES.InputPar[name][1].lower() == 'log': min = math.log10(float(ES.InputPar[name][2])) max = math.log10(float(ES.InputPar[name][3])) theta[i] = 10.0**(hypercube[i]*(max - min) + min ) else: sf.ErrorStop( 'Not ready. Only "flat" and "log" prior can be used.' ) # print 'my_prior: theta=',theta return theta def my_Loglikelihood(theta): phi=[0.0]*nDerived derived=[0.0]*nDerived theta.extend(derived) cube=theta logL=LogLikelihood(cube,n_dims,n_params) # print 'my_Loglikelihood after log',cube phi=cube[n_dims:] # print 'my loglikelihood: phi=',phi # print 'my log likelihood: LogL=',logL return logL,phi output = PyPolyChord.run_polychord(my_Loglikelihood, n_dims, nDerived, settings, my_prior)
norm.logpdf(theta, loc2, sigma2)) return float(result), [] def prior(hypercube): """ Uniform Prior for [Tmin, Tmax]. """ theta = [0.0] * nDims for i, x in enumerate(hypercube): theta[i] = UniformPrior(Tmin, Tmax)(x) return theta # Define PolyChord settings settings = PolyChordSettings(nDims, nDerived) settings.do_clustering = True settings.nlive = 200 settings.file_root = 'bimodal' settings.read_resume = False start = time.time() # Run PolyChord output = PPC.run_polychord(logLikelihood, nDims, nDerived, settings, prior) # End time end = time.time() Dt = end - start print(f'\nTotal run time was: {datetime.timedelta(seconds=int(Dt))}')