def drun(self, dlogz=0.1, ndim=4): """ simple main function for sampling. """ # initialize our nested sampler sampler = dynesty.NestedSampler(self.log_likelihood, likemod.pt_te, ndim) sampler.run_nested(dlogz=dlogz) self.res = sampler.results self.res.summary() fig, _ = dyplot.runplot(self.res, lnz_error=False) fig1, _ = dyplot.traceplot(self.res, truths=np.zeros(ndim), \ truth_color='black', show_titles=True, \ trace_cmap='viridis', connect=True, \ connect_highlight=range(10)) fig2, _ = dyplot.cornerplot(self.res, color='blue', \ truth_color='black', show_titles=True, \ max_n_ticks=3, quantiles=None) fig.savefig('./output/evidence.png') fig1.savefig('./output/tracers.png') fig2.savefig('./output/cornerplot.png')
def fit(self, log_likelihood, start, num_dim, prior_transform, save_dims=None, uid=None): import dynesty filename = self.get_filename(uid) if os.path.exists(filename): self.logger.info("Not sampling, returning result from file.") return self.load_file(filename) self.logger.info("Sampling posterior now") if save_dims is None: save_dims = num_dim self.logger.debug("Fitting framework with %d dimensions" % num_dim) self.logger.info("Using dynesty Sampler") sampler = dynesty.NestedSampler(log_likelihood, prior_transform, num_dim, nlive=self.nlive) sampler.run_nested(maxiter=self.max_iter, print_progress=False) self.logger.debug("Fit finished") dresults = sampler.results chain = dresults["samples"] weights = np.exp(dresults["logwt"] - dresults["logz"][-1]) max_weight = weights.max() trim = max_weight / 1e5 mask = weights > trim likelihood = dresults["logl"] self._save(chain[mask, :], weights[mask], likelihood[mask], filename, save_dims) return { "chain": chain[mask, :], "weights": weights[mask], "posterior": likelihood[mask] }
def __call__(self, kwargs=dict()): """ Parameters ---------- kwargs : dict extra input argument controlling sampling process i.e., 'dlogz' for stopping criteria Returns ------- Dynesty sampling results """ log.debug('@ dynesty_pipeline::__call__') # init dynesty sampler = dynesty.NestedSampler(self._mpi_likelihood, self.prior, len(self._active_parameters), **self._sampling_controllers) sampler.run_nested(**kwargs) return sampler.results
def run(self, dlogz=10): """Main method to combine short nested sampling and Emcee""" ndim = len(self.labels) print('Initial Nested sampling') sampler = dynesty.NestedSampler(self._log_likelihood, likemod.pt_te, ndim, nlive=100) sampler.run_nested(dlogz=dlogz) res = sampler.results print("\nStarting Emcee") initial_value = res['samples'][-1] print("Initial values are:") print("{:s} = {:f}".format(self.labels[0], initial_value[0])) print("{:s} = {:f}".format(self.labels[1], initial_value[1])) print("{:s} = {:f}".format(self.labels[2], initial_value[2])) print("{:s} = {:f}".format(self.labels[3], initial_value[3])) self._emcee_fit(initial_value)
def _run_test(self): import dynesty import pandas as pd self.sampler = dynesty.NestedSampler( loglikelihood=self.log_likelihood, prior_transform=self.prior_transform, ndim=self.ndim, **self.sampler_init_kwargs) sampler_kwargs = self.sampler_function_kwargs.copy() sampler_kwargs['maxiter'] = 2 self.sampler.run_nested(**sampler_kwargs) N = 100 self.result.samples = pd.DataFrame( self.priors.sample(N))[self.search_parameter_keys].values self.result.nested_samples = self.result.samples self.result.log_likelihood_evaluations = np.ones(N) self.result.log_evidence = 1 self.result.log_evidence_err = 0.1 return self.result
def test_gaussianx(bound): ndim = 3 rstate = get_rstate() g = Gaussian(ndim=ndim) sampler = dynesty.NestedSampler(g.loglikelihood, g.prior_transform, g.ndim, nlive=nlive, rstate=rstate, bound=bound) sampler.run_nested(print_progress=printing) results = sampler.results dyplot.boundplot(results, dims=(0, 1)[:min(ndim, 2)], it=1000, prior_transform=g.prior_transform, show_live=False) dyplot.cornerbound(results, it=3000, prior_transform=g.prior_transform, show_live=False)
def test_unravel(): # test unravel_run ndim = 2 rstate = get_rstate() sampler = dynesty.NestedSampler(loglike, prior_transform, ndim, nlive=nlive, rstate=rstate) sampler.run_nested(print_progress=printing) dyutil.unravel_run(sampler.results) sampler = dynesty.DynamicNestedSampler(loglike, prior_transform, ndim, nlive=nlive, rstate=rstate) sampler.run_nested(dlogz_init=1, maxcall=1000, print_progress=printing) dyutil.unravel_run(sampler.results) logps = sampler.results.logl dyutil.reweight_run(sampler.results, logps / 4.)
def test_periodic(): # hard test of dynamic sampler with high dlogz_init and small number # of live points logz_true = np.log(np.sqrt(2 * np.pi) * erf(win / np.sqrt(2)) / (2 * win)) thresh = 5 ndim = 2 sampler = dynesty.DynamicNestedSampler(loglike, prior_transform, ndim, nlive=nlive, periodic=[0]) sampler.run_nested(dlogz_init=1, print_progress=printing) assert (np.abs(sampler.results.logz[-1] - logz_true) < thresh * sampler.results.logzerr[-1]) sampler = dynesty.NestedSampler(loglike, prior_transform, ndim, nlive=nlive, periodic=[0]) sampler.run_nested(dlogz=1, print_progress=printing) assert (np.abs(sampler.results.logz[-1] - logz_true) < thresh * sampler.results.logzerr[-1])
def test_unravel(): # hard test of dynamic sampler with high dlogz_init and small number # of live points ndim = 2 rstate = get_rstate() sampler = dynesty.NestedSampler(loglike, prior_transform, ndim, nlive=nlive, rstate=rstate) sampler.run_nested() dyutil.unravel_run(sampler.results) sampler = dynesty.DynamicNestedSampler(loglike, prior_transform, ndim, nlive=nlive, rstate=rstate) sampler.run_nested(dlogz_init=1, maxcall=1000) dyutil.unravel_run(sampler.results) logps = sampler.results.logl dyutil.reweight_run(sampler.results, logps / 4.)
def run_sampler(self): import dynesty self.sampler = dynesty.NestedSampler( loglikelihood=self.log_likelihood, prior_transform=self.prior_transform, ndim=self.ndim, **self.sampler_init_kwargs) if self.check_point: out = self._run_external_sampler_with_checkpointing() else: out = self._run_external_sampler_without_checkpointing() # Flushes the output to force a line break if self.kwargs["verbose"]: print("") # self.result.sampler_output = out weights = np.exp(out['logwt'] - out['logz'][-1]) nested_samples = DataFrame(out.samples, columns=self.search_parameter_keys) nested_samples['weights'] = weights nested_samples['log_likelihood'] = out.logl self.result.samples = dynesty.utils.resample_equal( out.samples, weights) self.result.nested_samples = nested_samples self.result.log_likelihood_evaluations = self.reorder_loglikelihoods( unsorted_loglikelihoods=out.logl, unsorted_samples=out.samples, sorted_samples=self.result.samples) self.result.log_evidence = out.logz[-1] self.result.log_evidence_err = out.logzerr[-1] if self.plot: self.generate_trace_plots(out) return self.result
def test_gaussian(): sig = 5 rstate = get_rstate() g = Gaussian() sampler = dynesty.NestedSampler(g.loglikelihood, g.prior_transform, g.ndim, nlive=nlive, rstate=rstate) sampler.run_nested(print_progress=printing) # check that jitter/resample work # for not dynamic sampler dyfunc.jitter_run(sampler.results, rstate=rstate) dyfunc.resample_run(sampler.results, rstate=rstate) # add samples # check continuation behavior sampler.run_nested(dlogz=0.1, print_progress=printing) # get errors nerr = 3 result_list = [] for i in range(nerr): sampler.reset() sampler.run_nested(print_progress=False) results = sampler.results result_list.append(results) pos = results.samples wts = np.exp(results.logwt - results.logz[-1]) mean, cov = dyfunc.mean_and_cov(pos, wts) logz = results.logz[-1] assert (np.abs(logz - g.logz_truth) < sig * results.logzerr[-1]) res_comb = dyfunc.merge_runs(result_list) assert (np.abs(res_comb.logz[-1] - g.logz_truth) < sig * results.logzerr[-1]) # check summary res = sampler.results res.summary()
def test_bounding_sample(bound, sample): # check various bounding methods rstate = get_rstate() if bound == 'none': if sample != 'unif': g = Gaussian(0.1) else: g = Gaussian(corr=0., prior_win=10) # make live easy if bound is none # but also not too easy so propose_point() is exercised else: g = Gaussian() sampler = dynesty.NestedSampler(g.loglikelihood, g.prior_transform, g.ndim, nlive=nlive, bound=bound, sample=sample, rstate=rstate) sampler.run_nested(print_progress=printing) check_results_gau(sampler.results, g, rstate) print(sampler.citations)
def plot(N1, prior_dist, H, dlogz_val, nlive_val, **prior_kwargs): plt.figure(figsize=(15, 10)) plt.suptitle(' dlogz = ' + str(dlogz_val) + ', nlive = ' \ + str(nlive_val), fontsize = 25, ha = 'center') #prior transform (constant) for i, n_values in enumerate(N1): sum_val = generator(H, n_values) #total sum of Hval def log_likelyhood(H): return np.log(like_func(n_values, sum_val, H[0])) #same as found in documentation: ndim = 1 #number of dimensions in the problem sampler = dynesty.NestedSampler(log_likelyhood, prior_dist, ndim, \ bound='single', nlive=nlive_val) sampler.run_nested(dlogz=dlogz_val, print_progress=False) results = sampler.results x_axis = results.samples y_axis = np.exp(results.logl) plt.subplot(4, 4, i + 1) plt.plot(x_axis, y_axis, '.', color="b") plt.title('n_values = ' + str(n_values)) plt.xlabel('likelihood') plt.ylabel('N0 of heads') plt.tight_layout(rect=[0, 0.03, 1, 0.95]) #plt.savefig( "gaussian_0.5_0.2" + str(dlogz_val) +str(nlive_val) +".png", \ # dpi =300, bbox_inches = 'tight') plt.show()
def test_gaussian(): logz_tol = 1 sampler = dynesty.NestedSampler(loglikelihood_gau, prior_transform_gau, ntotdim, nlive=nlive, ncdim=ndim_gau) sampler.run_nested(print_progress=printing) # check that jitter/resample/simulate_run work # for not dynamic sampler dyfunc.jitter_run(sampler.results) dyfunc.resample_run(sampler.results) dyfunc.simulate_run(sampler.results) # add samples # check continuation behavior sampler.run_nested(dlogz=0.1, print_progress=printing) # get errors nerr = 2 result_list = [] for i in range(nerr): sampler.reset() sampler.run_nested(print_progress=False) results = sampler.results result_list.append(results) pos = results.samples wts = np.exp(results.logwt - results.logz[-1]) mean, cov = dyfunc.mean_and_cov(pos, wts) logz = results.logz[-1] assert (np.abs(logz - logz_truth_gau) < logz_tol) res_comb = dyfunc.merge_runs(result_list) assert (np.abs(res_comb.logz[-1] - logz_truth_gau) < logz_tol) # check summary res = sampler.results res.summary()
def fit_gaia_data(name, gaia_data, clobber=False): # We will fit for jitter parameters for each magnitude jitter_vars = ["G", "BP", "RP"] # Set up an isochrones model using the MIST tracks mist = isochrones.get_ichrone("mist", bands=["G", "BP", "RP"]) mod = isochrones.SingleStarModel(mist, **gaia_data) # Return the existing samples if not clobbering output_dir = os.path.join(OUTPUT_DIR, __version__, name) os.makedirs(output_dir, exist_ok=True) fn = os.path.join(output_dir, "star.h5") if (not clobber) and os.path.exists(fn): mod._samples = pd.read_hdf(fn, "samples") mod._derived_samples = pd.read_hdf(fn, "derived_samples") return mod with open(os.path.join(output_dir, "gaia.json"), "w") as f: json.dump( dict((k, v.tolist()) for k, v in gaia_data.items()), f, indent=2, sort_keys=True, ) # These functions wrap isochrones so that they can be used with dynesty: def prior_transform(u): cube = np.copy(u) mod.mnest_prior(cube[: mod.n_params], None, None) cube[mod.n_params :] = -10 + 20 * cube[mod.n_params :] return cube def loglike(theta): ind0 = mod.n_params lp0 = 0.0 for i, k in enumerate(jitter_vars): err = np.sqrt(gaia_data[k][1] ** 2 + np.exp(theta[ind0 + i])) lp0 -= 2 * np.log(err) # This is to fix a bug in isochrones mod.kwargs[k] = (gaia_data[k][0], err) lp = lp0 + mod.lnpost(theta[: mod.n_params]) if np.isfinite(lp): return np.clip(lp, -1e10, np.inf) return -1e10 # Run nested sampling on this model sampler = dynesty.NestedSampler( loglike, prior_transform, mod.n_params + len(jitter_vars) ) strt = time.time() sampler.run_nested() total_time = (time.time() - strt) / 60.0 print("Sampling took {0} minutes".format(total_time)) # Resample the chain to get unit weight samples and update the isochrones # model results = sampler.results samples = dynesty.utils.resample_equal( results.samples, np.exp(results.logwt - results.logz[-1]) ) df = mod._samples = pd.DataFrame( dict( zip( list(mod.param_names) + ["log_jitter_" + k for k in jitter_vars], samples.T, ) ) ) mod._derived_samples = mod.ic(*[df[c].values for c in mod.param_names]) mod._derived_samples["parallax"] = 1000.0 / df["distance"] mod._derived_samples["distance"] = df["distance"] mod._derived_samples["AV"] = df["AV"] # Save these results to disk mod._samples.to_hdf(fn, "samples") mod._derived_samples.to_hdf(fn, "derived_samples") # Save the summary to disk mod._derived_samples.describe().transpose().to_csv( os.path.join(output_dir, "star_summary.csv") ) # Summarize the sampling performance summary = dict( nlive=int(results.nlive), niter=int(results.niter), ncall=int(sum(results.ncall)), eff=float(results.eff), logz=float(results.logz[-1]), logzerr=float(results.logzerr[-1]), total_time=float(total_time), ) with open( os.path.join(output_dir, "star_sampling_summary.json"), "w" ) as f: json.dump(summary, f, indent=True, sort_keys=True) return mod, sampler
def ns_fit(datadir): #::: init config.init(datadir) #::: settings nlive = config.BASEMENT.settings[ 'ns_nlive'] # (default 500) number of live points bound = config.BASEMENT.settings[ 'ns_bound'] # (default 'single') use MutliNest algorithm for bounds ndim = config.BASEMENT.ndim # number of parameters sample = config.BASEMENT.settings[ 'ns_sample'] # (default 'auto') random walk sampling tol = config.BASEMENT.settings[ 'ns_tol'] # (defualt 0.01) the stopping criterion #::: run if config.BASEMENT.settings['ns_modus'] == 'static': logprint('\nRunning Static Nested Sampler...') logprint('--------------------------') t0 = timer() if config.BASEMENT.settings['multiprocess']: with closing( Pool(processes=(config.BASEMENT. settings['multiprocess_cores']))) as pool: logprint('\nRunning on', config.BASEMENT.settings['multiprocess_cores'], 'CPUs.') sampler = dynesty.NestedSampler( ns_lnlike, ns_prior_transform, ndim, pool=pool, queue_size=config.BASEMENT.settings['multiprocess_cores'], bound=bound, sample=sample, nlive=nlive) sampler.run_nested( dlogz=tol, print_progress=config.BASEMENT.settings['print_progress']) else: sampler = dynesty.NestedSampler(ns_lnlike, ns_prior_transform, ndim, bound=bound, sample=sample, nlive=nlive) sampler.run_nested( dlogz=tol, print_progress=config.BASEMENT.settings['print_progress']) t1 = timer() timedynesty = (t1 - t0) logprint("\nTime taken to run 'dynesty' (in static mode) is {} hours". format(int(timedynesty / 60. / 60.))) elif config.BASEMENT.settings['ns_modus'] == 'dynamic': logprint('\nRunning Dynamic Nested Sampler...') logprint('--------------------------') t0 = timer() if config.BASEMENT.settings['multiprocess']: with closing( Pool(processes=config.BASEMENT. settings['multiprocess_cores'])) as pool: logprint('\nRunning on', config.BASEMENT.settings['multiprocess_cores'], 'CPUs.') sampler = dynesty.DynamicNestedSampler( ns_lnlike, ns_prior_transform, ndim, pool=pool, queue_size=config.BASEMENT.settings['multiprocess_cores'], bound=bound, sample=sample) sampler.run_nested( nlive_init=nlive, dlogz_init=tol, print_progress=config.BASEMENT.settings['print_progress']) else: sampler = dynesty.DynamicNestedSampler(ns_lnlike, ns_prior_transform, ndim, bound=bound, sample=sample) sampler.run_nested( nlive_init=nlive, print_progress=config.BASEMENT.settings['print_progress']) t1 = timer() timedynestydynamic = (t1 - t0) logprint( "\nTime taken to run 'dynesty' (in dynamic mode) is {:.2f} hours". format(timedynestydynamic / 60. / 60.)) #::: pickle-save the 'results' class results = sampler.results f = gzip.GzipFile( os.path.join(config.BASEMENT.outdir, 'save_ns.pickle.gz'), 'wb') pickle.dump(results, f) f.close() #::: return a German saying try: with open( os.path.join(os.path.dirname(__file__), 'utils', 'quotes2.txt')) as dataset: return (np.random.choice([l for l in dataset])) except: return ('42')
def pyorbit_dynesty(config_in, input_datasets=None, return_output=None): output_directory = './' + config_in['output'] + '/dynesty/' mc = ModelContainerDynesty() pars_input(config_in, mc, input_datasets) if mc.nested_sampling_parameters['shutdown_jitter']: for dataset in mc.dataset_dict.itervalues(): dataset.shutdown_jitter() mc.model_setup() mc.create_variables_bounds() mc.initialize_logchi2() mc.create_starting_point() results_analysis.results_resumen(mc, None, skip_theta=True) mc.output_directory = output_directory print() print('Reference Time Tref: ', mc.Tref) print() print('*************************************************************') print() import dynesty # "Standard" nested sampling. sampler = dynesty.NestedSampler(mc.dynesty_call, mc.dynesty_priors, mc.ndim) sampler.run_nested() results = sampler.results # "Dynamic" nested sampling. dsampler = dynesty.DynamicNestedSampler(mc.dynesty_call, mc.dynesty_priors, mc.ndim) dsampler.run_nested() dresults = dsampler.results from dynesty import plotting as dyplot # Plot a summary of the run. rfig, raxes = dyplot.runplot(results) # Plot traces and 1-D marginalized posteriors. tfig, taxes = dyplot.traceplot(results) # Plot the 2-D marginalized posteriors. cfig, caxes = dyplot.cornerplot(results) from dynesty import utils as dyfunc # Extract sampling results. samples = results.samples # samples weights = np.exp(results.logwt - results.logz[-1]) # normalized weights # Compute 5%-95% quantiles. quantiles = dyfunc.quantile(samples, [0.05, 0.95], weights=weights) # Compute weighted mean and covariance. mean, cov = dyfunc.mean_and_cov(samples, weights) # Resample weighted samples. samples_equal = dyfunc.resample_equal(samples, weights) # Generate a new set of results with statistical+sampling uncertainties. results_sim = dyfunc.simulate_run(results) """ A dummy file is created to let the cpulimit script to proceed with the next step""" nested_sampling_create_dummy_file(mc) if return_output: return mc else: return
mass_sum = np.sum(param_vector) loglikeli = log_pdf(msqr1, DATA.m21_sqr, DATA.m21_sqr_error) # Smaller mass gap loglikeli += log_pdf(msqr2, DATA.m31_sqr, DATA.m31_sqr_error) # Larger mass gap loglikeli += log_pdf(mass_sum, DATA.sum_of_masses_offset, DATA.sum_of_masses_one_sigma) return loglikeli # Move to separate script sampler = dy.NestedSampler(loglikelihood=evaluate_log_likelihood_of_parameters, prior_transform=prior_map, ndim=3, nlive=1_000, bound='multi', sample='auto') sampler.run_nested(dlogz=0.01, maxiter=100_000) sampler.results.summary() # iter: 23487 | +1000 | bound: 65 | nc: 1 | ncall: 102516 | eff(%): 23.886 | loglstar: -inf < 22.634 < inf | logz: 3.769 +/- nan | dlogz: 0.000 > 0.010 Summary # ======= # nlive: 1000 # niter: 23487 # ncall: 102516 # eff(%): 23.886 # logz: 3.769
def runsampler(self, samplerdict): # pull out user defined sampler variables npoints = samplerdict.get('npoints', 200) samplertype = samplerdict.get('samplertype', 'multi') bootstrap = samplerdict.get('bootstrap', 0) update_interval = samplerdict.get('update_interval', 0.6) samplemethod = samplerdict.get('samplemethod', 'unif') delta_logz_final = samplerdict.get('delta_logz_final', 0.01) flushnum = samplerdict.get('flushnum', 10) maxiter = samplerdict.get('maxiter', sys.maxint) # set start time starttime = datetime.now() if self.verbose: print( 'Start Dynesty w/ {0} number of samples, Ndim = {1}, and w/ stopping criteria of dlog(z) = {2}: {3}' .format(npoints, self.ndim, delta_logz_final, starttime)) sys.stdout.flush() # initialize sampler object dy_sampler = dynesty.NestedSampler( lnprobfn, self.priorobj.priortrans, self.ndim, logl_args=[self.likeobj, self.priorobj], nlive=npoints, bound=samplertype, sample=samplemethod, update_interval=update_interval, bootstrap=bootstrap, ) sys.stdout.flush() ncall = 0 nit = 0 # start sampling for it, results in enumerate( dy_sampler.sample(dlogz=delta_logz_final)): (worst, ustar, vstar, loglstar, logvol, logwt, logz, logzvar, h, nc, worst_it, propidx, propiter, eff, delta_logz) = results self.outff.write('{0} '.format(it)) self.outff.write(' '.join([str(q) for q in vstar])) self.outff.write(' {0} {1} {2} {3} {4} {5} {6} '.format( loglstar, logvol, logwt, h, nc, logz, delta_logz)) self.outff.write('\n') ncall += nc nit = it if ((it % flushnum) == 0) or (it == maxiter): self.outff.flush() if self.verbose: # format/output results if logz < -1e6: logz = -np.inf if delta_logz > 1e6: delta_logz = np.inf if logzvar >= 0.: logzerr = np.sqrt(logzvar) else: logzerr = np.nan if logzerr > 1e6: logzerr = np.inf sys.stdout.write( "\riter: {0:d} | nc: {1:d} | ncall: {2:d} | eff(%): {3:6.3f} | " "logz: {4:6.3f} +/- {5:6.3f} | dlogz: {6:6.3f} > {7:6.3f} " .format(nit, nc, ncall, eff, logz, logzerr, delta_logz, delta_logz_final)) sys.stdout.flush() if (it == maxiter): break # add live points to sampler object for it2, results in enumerate(dy_sampler.add_live_points()): # split up results (worst, ustar, vstar, loglstar, logvol, logwt, logz, logzvar, h, nc, worst_it, boundidx, bounditer, eff, delta_logz) = results self.outff.write('{0} '.format(nit + it2)) self.outff.write(' '.join([str(q) for q in vstar])) self.outff.write(' {0} {1} {2} {3} {4} {5} {6} '.format( loglstar, logvol, logwt, h, nc, logz, delta_logz)) self.outff.write('\n') ncall += nc ncall += nc if self.verbose: # format/output results if logz < -1e6: logz = -np.inf if delta_logz > 1e6: delta_logz = np.inf if logzvar >= 0.: logzerr = np.sqrt(logzvar) else: logzerr = np.nan if logzerr > 1e6: logzerr = np.inf sys.stdout.write( "\riter: {:d} | nc: {:d} | ncall: {:d} | eff(%): {:6.3f} | " "logz: {:6.3f} +/- {:6.3f} | dlogz: {:6.3f} > {:6.3f} " .format(nit + it2, nc, ncall, eff, logz, logzerr, delta_logz, delta_logz_final)) sys.stdout.flush() self.outff.close() sys.stdout.write('\n') finishtime = datetime.now() if self.verbose: print('RUN TIME: {0}'.format(finishtime - starttime)) return dy_sampler
# global print_number # if print_number < 100: # print_number += 1 # print('parameter values:', x) return x # plot = True from multiprocessing import Pool ndim = len(theta_h2o) full_results = [] with Pool() as pool: for transit_data in noisey_transit_depth: sampler = dynesty.NestedSampler(log_likelihood_h2o, prior_trans, ndim, nlive=500, pool=pool, queue_size=pool._processes, logl_args=(transit_data, fixed_h2o)) sampler.run_nested() full_results.append(sampler.results) if plot: # make a plot of results labels = ["Rad_planet", "T", "log H2O"] truths = [rad_planet, T, log_fh2o] for result in full_results: fig, axes = dyplot.cornerplot(result, truths=truths, show_titles=True, title_kwargs={'y': 1.04}, labels=labels, fig=plt.subplots(len(truths), len(truths), figsize=(10, 10))) fig.suptitle('Red lines are true values', fontsize=14) # fig.savefig('/test/my_first_cornerplot.png')
ifit = np.abs(imjdobs - t0_est) < 365 kfit = np.abs(kmjdobs - t0_est) < 365 model_kwargs = { 'step': 0.02, 't0': t0_est, 'ra': ra, 'dec': dec, 'motion_mode': 'parallactic', 'occulter_mode': 'solid', } ecl_model_V = model_eclipse(mjd_points=vmjdobs[vfit], mu=1.20, **model_kwargs) ecl_model_I = model_eclipse(mjd_points=imjdobs[ifit], mu=1.10, **model_kwargs) ecl_model_K = model_eclipse(mjd_points=kmjdobs[kfit], mu=1.05, **model_kwargs) with Pool(processes=nproc) as pool: dsampler = dynesty.NestedSampler( lnprob, prior_transform, ndim=11, periodic=[7], pool=pool, queue_size=nproc, logl_args=[[ecl_model_V, ecl_model_I, ecl_model_K], [vflux[vfit], iflux[ifit], kflux[kfit]], [vflux_error[vfit], iflux_error[ifit], kflux_error[kfit]]]) dsampler.run_nested() dres = dsampler.results pickle.dump(dres, open("dres_advanced_solid_corner.p", "wb"))
def fit_source(ra=53.115295, dec=-27.803501, dofit=True, nlive=100): # --- Build the postage stamp ---- ra_init, dec_init = ra, dec pos_init = (ra_init, dec_init) stamps = [ make_stamp(im, pos_init, center_type='celestial', size=(50, 50), psfname=psfname) for im in imnames ] # override the psf to reflect in both directions T = -1.0 * np.eye(2) for s in stamps: s.psf.covariances = np.matmul(T, np.matmul(s.psf.covariances, T.T)) s.psf.means = np.matmul(s.psf.means, T) # --- get the Scene --- source = Star(filters=["F090W"]) scene = Scene([source]) label = ['Counts', 'RA', 'Dec'] plans = [WorkPlan(stamp) for stamp in stamps] lnlike = argfix(lnlike_multi, scene=scene, plans=plans, grad=False) # --- Initialize --- theta_init = np.array( [stamps[0].pixel_values.sum() * 1.0, ra_init, dec_init]) # a rough measure of dcoordinate/dpix plate_scale, _ = np.linalg.eig(np.linalg.inv(stamps[0].dpix_dsky)) # make the prior ~10 pixels wide, and 50% of counts theta_width = np.array( [0.5 * theta_init[0], 10 * plate_scale[0], 10 * plate_scale[1]]) #print(theta_init, theta_width) # --- Nested sampling --- ndim = 3 def prior_transform(unit_coords): # convert to uniform -1 to 1 u = (2 * unit_coords - 1.) # now scale and shift theta = theta_init + theta_width * u return theta if dofit: import dynesty, time # "Standard" nested sampling. sampler = dynesty.NestedSampler(lnlike, prior_transform, ndim, nlive=nlive, bootstrap=0) t0 = time.time() sampler.run_nested() dur = time.time() - t0 results = sampler.results results['duration'] = dur indmax = results['logl'].argmax() theta_max = results['samples'][indmax, :] else: results = None theta_max = np.zeros(3) stamps = None return results, theta_max, stamps, scene
PC= Full_scale(Gs, Pmfl) Gchi, Pchi = Full_fit(Gs, PC * Gmfl, PC * Pmfl) return -0.5 * (Gchi + Pchi) ############## ############ ############## redshift run ############ sp = fsps.StellarPopulation(imf_type = 2, tpagb_norm_type=0, zcontinuous = 1, logzsol = np.log10(1), sfh = 4, tau = 0.1) Gs = Gen_spec(field, galaxy, 1, g102_lims=[8300, 11288], g141_lims=[11288, 16500],mdl_err = False, phot_errterm = 0.03, decontam = True) wvs, flxs, errs, beams, trans = Gather_grism_data(Gs) zsampler = dynesty.NestedSampler( rshift_loglikelihood, rshift_prior, ndim = 5, sample = 'rwalk', bound = 'balls') zsampler.run_nested(print_progress=False) zres = zsampler.results t,pt = Get_posterior(zres.samples[:,3 ],zres.logwt,zres.logz) specz = t[pt == max(pt)][0] agelim = Oldest_galaxy(specz) LBT = Time_bins(agelim) ############## ############ ############## bestfit run ############ sp = fsps.StellarPopulation(imf_type = 2, tpagb_norm_type=0, zcontinuous = 1, logzsol = np.log10(1), sfh = 3, dust_type = 1) Gs = Gen_spec(field, galaxy, 1, g102_lims=[8300, 11288], g141_lims=[11288, 16500],mdl_err = False, phot_errterm = 0.0, decontam = True)
def ns_fit(datadir): #::: init config.init(datadir) #::: show initial guess show_initial_guess() #::: settings nlive = config.BASEMENT.settings[ 'ns_nlive'] # (default 500) number of live points bound = config.BASEMENT.settings[ 'ns_bound'] # (default 'single') use MutliNest algorithm for bounds ndim = config.BASEMENT.ndim # number of parameters sample = config.BASEMENT.settings[ 'ns_sample'] # (default 'auto') random walk sampling tol = config.BASEMENT.settings[ 'ns_tol'] # (defualt 0.01) the stopping criterion #::: run if config.BASEMENT.settings['ns_modus'] == 'static': logprint('\nRunning Static Nested Sampler...') logprint('--------------------------') t0 = timer() if config.BASEMENT.settings['multiprocess']: with closing( Pool(processes=(config.BASEMENT. settings['multiprocess_cores']))) as pool: logprint('\nRunning on', config.BASEMENT.settings['multiprocess_cores'], 'CPUs.') sampler = dynesty.NestedSampler( ns_lnlike, ns_prior_transform, ndim, pool=pool, queue_size=config.BASEMENT.settings['multiprocess_cores'], bound=bound, sample=sample, nlive=nlive) sampler.run_nested(dlogz=tol, print_progress=True) else: sampler = dynesty.NestedSampler(ns_lnlike, ns_prior_transform, ndim, bound=bound, sample=sample, nlive=nlive) sampler.run_nested(dlogz=tol, print_progress=True) t1 = timer() timedynesty = (t1 - t0) logprint("\nTime taken to run 'dynesty' (in static mode) is {} hours". format(int(timedynesty / 60. / 60.))) elif config.BASEMENT.settings['ns_modus'] == 'dynamic': logprint('\nRunning Dynamic Nested Sampler...') logprint('--------------------------') t0 = timer() if config.BASEMENT.settings['multiprocess']: with closing( Pool(processes=config.BASEMENT. settings['multiprocess_cores'])) as pool: logprint('\nRunning on', config.BASEMENT.settings['multiprocess_cores'], 'CPUs.') sampler = dynesty.DynamicNestedSampler( ns_lnlike, ns_prior_transform, ndim, pool=pool, queue_size=config.BASEMENT.settings['multiprocess_cores'], bound=bound, sample=sample) sampler.run_nested(nlive_init=nlive, dlogz_init=tol, print_progress=True) else: sampler = dynesty.DynamicNestedSampler(ns_lnlike, ns_prior_transform, ndim, bound=bound, sample=sample) sampler.run_nested(nlive_init=nlive, print_progress=True) t1 = timer() timedynestydynamic = (t1 - t0) logprint("\nTime taken to run 'dynesty' (in dynamic mode) is {} hours". format(int(timedynestydynamic / 60. / 60.))) #::: pickle-save the 'results' class results = sampler.results with open(os.path.join(config.BASEMENT.outdir, 'save_ns.pickle'), 'wb') as f: pickle.dump(results, f)
def _runsampler(self,samplerdict): # pull out user defined sampler variables npoints = samplerdict.get('npoints',200) samplertype = samplerdict.get('samplerbounds','multi') bootstrap = samplerdict.get('bootstrap',0) update_interval = samplerdict.get('update_interval',0.6) samplemethod = samplerdict.get('samplemethod','unif') delta_logz_final = samplerdict.get('delta_logz_final',0.01) flushnum = samplerdict.get('flushnum',10) numslice = samplerdict.get('slices',5) numwalks = samplerdict.get('walks',25) reflective_list = samplerdict.get('reflective',[]) # calc index of reflective prior par reflective = [] for ii,par in enumerate(self.likeobj.fitpars_i): if self.fitpars_bool[par] and (par in reflective_list): reflective.append(ii) try: # Python 2.x maxiter = samplerdict.get('maxiter',sys.maxint) except AttributeError: # Python 3.x maxiter = samplerdict.get('maxiter',sys.maxsize) try: # Python 2.x maxcall = samplerdict.get('maxcall',sys.maxint) except AttributeError: # Python 3.x maxcall = samplerdict.get('maxcall',sys.maxsize) n_effective = samplerdict.get('n_effective',np.inf) if samplemethod == 'rwalk': numws = numwalks elif samplemethod == 'slice': numws = numslice else: numws = numwalks # set start time starttime = datetime.now() if self.verbose: print( 'Static Dynesty w/ {0} sampler, {1} walks/slices, {2} number of samples, Ndim = {3}, and w/ stopping criteria of dlog(z) = {4}: {5}'.format( samplemethod,numws,npoints,self.ndim,delta_logz_final,starttime)) print('Max Iter: {0} / Max Call: {1}'.format(maxiter,maxcall)) sys.stdout.flush() # initialize sampler object dy_sampler = dynesty.NestedSampler( lnprobfn, self.priorobj.priortrans, self.ndim, logl_args=[self.likeobj,self.priorobj], nlive=npoints, bound=samplertype, sample=samplemethod, # update_interval=update_interval, bootstrap=bootstrap, walks=numwalks, slices=numslice, # reflective=reflective, # update_interval=10, # first_update={'min_eff':5.0,'min_ncall':1000}, # vol_dec=4.0, # vol_check=0.1, ) sys.stdout.flush() ncall = 0 nit = 0 iter_starttime = datetime.now() deltaitertime_arr = [] # start sampling print('Start Sampling @ {}'.format(iter_starttime)) for it, results in enumerate(dy_sampler.sample( dlogz=delta_logz_final, maxiter=maxiter, maxcall=maxcall, )): (worst, ustar, vstar, loglstar, logvol, logwt, logz, logzvar, h, nc, worst_it, propidx, propiter, eff, delta_logz) = results if it == 0: # initialize the output file parnames = self.likeobj.parsdict.keys() self._initoutput(parnames) self.outff.write('{0} '.format(it)) # self.outff.write(' '.join([str(q) for q in vstar])) try: self.outff.write( ' '.join( [str(self.likeobj.parsdict[q]) if (isinstance(self.likeobj.parsdict[q],float) or isinstance(self.likeobj.parsdict[q],int)) else 'nan' for q in parnames])) except: print('Sampling broke') print('worst:',worst) print('ustar:',ustar) print('vstar:',vstar) print('loglstar:',loglstar) print('logvol:',logvol) print('logwt:',logwt) print('logz:',logz) print('logzvar:',logzvar) print('h:',h) print('nc:',nc) print('worst_it:',worst_it) print('propidx:',propidx) print('propiter:',propiter) print('eff:',eff) print('delta_logz:',delta_logz) print(self.likeobj.parsdict) raise self.outff.write(' {0} {1} {2} {3} {4} {5} {6} '.format( loglstar,logvol,logwt,h,nc,logz,delta_logz)) self.outff.write('\n') ncall += nc nit = it deltaitertime_arr.append((datetime.now()-iter_starttime).total_seconds()/float(nc)) iter_starttime = datetime.now() if ((it%flushnum) == 0) or (it == maxiter): self.outff.flush() if self.verbose: # format/output results if logz < -1e6: logz = -np.inf if delta_logz > 1e8: delta_logz = np.inf if logzvar > 0.: logzerr = np.sqrt(logzvar) else: logzerr = np.nan if logzerr > 1e8: logzerr = np.inf if loglstar < -1e6: loglstar = -np.inf try: sys.stdout.write("\riter: {0:d} | nc: {1:d} | ncall: {2:d} | eff(%): {3:6.3f} | " "logz: {4:6.3f} +/- {5:6.3f} | loglk: {6:6.3f} | dlogz: {7:6.3f} > {8:6.3f} | mean(time): {9:7.5f} | time: {10} \n" .format(nit, nc, ncall, eff, logz, logzerr, loglstar, delta_logz, delta_logz_final,np.mean(deltaitertime_arr),datetime.now())) except: print(nit, nc, ncall, eff, logz, logzerr) sys.stdout.flush() deltaitertime_arr = [] if (it == maxiter): break print('Add live points to output file') # add live points to sampler object for it2, results in enumerate(dy_sampler.add_live_points()): # split up results (worst, ustar, vstar, loglstar, logvol, logwt, logz, logzvar, h, nc, worst_it, boundidx, bounditer, eff, delta_logz) = results self.outff.write('{0} '.format(nit+it2)) # self.outff.write(' '.join([str(q) for q in vstar])) self.likeobj.lnlikefn(vstar) self.outff.write( ' '.join( [str(self.likeobj.parsdict[q]) if (isinstance(self.likeobj.parsdict[q],float) or isinstance(self.likeobj.parsdict[q],int)) else 'nan' for q in parnames])) # self.outff.write(' '.join([str(self.likeobj.parsdict[q]) for q in parnames])) self.outff.write(' {0} {1} {2} {3} {4} {5} {6} '.format( loglstar,logvol,logwt,h,nc,logz,delta_logz)) self.outff.write('\n') ncall += nc if self.verbose: # format/output results if logz < -1e6: logz = -np.inf if delta_logz > 1e6: delta_logz = np.inf if logzvar > 0.: logzerr = np.sqrt(logzvar) else: logzerr = np.nan if logzerr > 1e6: logzerr = np.inf sys.stdout.write("\riter: {:d} | nc: {:d} | ncall: {:d} | eff(%): {:6.3f} | " "logz: {:6.3f} +/- {:6.3f} | dlogz: {:6.3f} > {:6.3f} " .format(nit + it2, nc, ncall, eff, logz, logzerr, delta_logz, delta_logz_final)) sys.stdout.flush() self.outff.close() sys.stdout.write('\n') finishtime = datetime.now() if self.verbose: print('RUN TIME: {0}'.format(finishtime-starttime)) return dy_sampler
def fit(self, sed, e_sed, parallax=[100, 0.001], nlive=250, distance=None, binary=False, plot_fit=True, plot_trace=False, plot_corner=False, progress=False, textx=0.025, textsize=12): if self.to_flux: sed = self.mag_to_flux(sed) e_sed = sed * e_sed # magnitude error to flux error if not binary: ndim = 3 def loglike(theta): teff, logg, plx = theta model = self.model_sed(teff, logg, plx) ivar = 1 / e_sed**2 logchi = -0.5 * np.sum((sed - model)**2 * ivar) if np.isnan(logchi): return -np.Inf else: return logchi def prior_transform(u): x = np.array(u) x[0] = u[0] * (self.teff_range[1] - self.teff_range[0]) + self.teff_range[0] x[1] = u[1] * (self.logg_range[1] - self.logg_range[0]) + self.logg_range[0] t = stats.norm.ppf(u[2]) x[2] = parallax[1] * t x[2] += parallax[0] return x elif binary: ndim = 5 def loglike(theta): teff1, logg1, teff2, logg2, plx = theta model = self.model_binary_sed(teff1, logg1, teff2, logg2, plx) ivar = 1 / e_sed**2 logchi = -0.5 * np.sum((sed - model)**2 * ivar) if np.isnan(logchi): return -np.Inf elif teff1 > teff2: return -np.Inf else: return logchi def prior_transform(u): x = np.array(u) x[0] = u[0] * (self.teff_range[1] - self.teff_range[0]) + self.teff_range[0] x[1] = u[1] * (self.logg_range[1] - self.logg_range[0]) + self.logg_range[0] x[2] = u[2] * (self.teff_range[1] - self.teff_range[0]) + self.teff_range[0] x[3] = u[3] * (self.logg_range[1] - self.logg_range[0]) + self.logg_range[0] t = stats.norm.ppf(u[4]) x[4] = parallax[1] * t x[4] += parallax[0] return x ########## DYNESTY ################### dsampler = dynesty.NestedSampler(loglike, prior_transform, ndim=ndim, nlive=nlive) dsampler.run_nested(print_progress=progress) result = dsampler.results samples, weights = result.samples, np.exp(result.logwt - result.logz[-1]) chis = -2 * np.array([loglike(sample) for sample in result.samples]) bestfit = np.argmin(chis) resampled = dyfunc.resample_equal(samples, weights) cov = np.var(resampled, axis=0) mean = result.samples[bestfit] print(result.samples[bestfit]) bandwls = [] for band in self.bands: bandwls.append(self.mean_wl[band]) ########## PLOTTING ################# if plot_trace: f = dyplot.traceplot(dsampler.results, show_titles=True, trace_cmap='viridis') plt.tight_layout() if plot_corner: if binary: f = dyplot.cornerplot(dsampler.results, show_titles=True, labels=[ '$T_{\mathrm{eff,1}}$', '$\log{g}_1$', '$T_{\mathrm{eff,2}}$', '$\log{g}_2$', r'$\varpi$' ]) if not binary: f = dyplot.cornerplot( dsampler.results, show_titles=True, labels=['$T_{\mathrm{eff}}$', '$\log{g}$', r'$\varpi$']) plt.tight_layout() if not binary: model = self.model_sed(*mean) ivar = 1 / e_sed**2 redchi = np.sum((sed - model)**2 * ivar) / (len(sed) - ndim) if plot_fit: plt.figure(figsize=(10, 5)) plt.errorbar(bandwls, sed, yerr=e_sed, linestyle='none', capsize=5, color='k') plt.scatter(bandwls, model, color='k') plt.text(textx, 0.35, '$T_{\mathrm{eff}}$ = %i ± %i' % (mean[0], np.sqrt(cov[0])), transform=plt.gca().transAxes, fontsize=textsize) plt.text(textx, 0.25, '$\log{g}$ = %.2f ± %.2f' % (mean[1], np.sqrt(cov[1])), transform=plt.gca().transAxes, fontsize=textsize) plt.text(textx, 0.15, 'atm = %s' % (self.atm_type), transform=plt.gca().transAxes, fontsize=textsize) plt.text(textx, 0.05, '$\chi_r^2$ = %.2f' % (redchi), transform=plt.gca().transAxes, fontsize=textsize) plt.xlabel('Wavelength ($\mathrm{\AA}$', fontsize=16) plt.ylabel( '$f_\lambda\ [erg\ cm^{-2}\ s^{-1}\ \mathrm{\AA}^{-1}]$', fontsize=16) plt.yscale('log') return [mean[0], np.sqrt(cov[0]), mean[1], np.sqrt(cov[1])], redchi elif binary: model = self.model_binary_sed(*mean) ivar = 1 / e_sed**2 redchi = np.sum((sed - model)**2 * ivar) / (len(sed) - ndim) if plot_fit: plt.figure(figsize=(10, 5)) plt.errorbar(bandwls, sed, yerr=e_sed, linestyle='none', capsize=5, color='k') plt.scatter(bandwls, model, color='k') plt.text(textx, 0.45, '$T_{\mathrm{eff,1}}$ = %i ± %i' % (mean[0], np.sqrt(cov[0])), transform=plt.gca().transAxes, fontsize=textsize) plt.text(textx, 0.35, '$\log{g}_1$ = %.2f ± %.2f' % (mean[1], np.sqrt(cov[1])), transform=plt.gca().transAxes, fontsize=textsize) plt.text(textx, 0.25, '$T_{\mathrm{eff,2}}$ = %i ± %i' % (mean[2], np.sqrt(cov[2])), transform=plt.gca().transAxes, fontsize=textsize) plt.text(textx, 0.15, '$\log{g}_2$ = %.2f ± %.2f' % (mean[3], np.sqrt(cov[3])), transform=plt.gca().transAxes, fontsize=textsize) #plt.text(0.15, 0.2, 'atm = %s' %(self.atm_type), transform = plt.gca().transAxes, fontsize = 12) plt.text(textx, 0.05, '$\chi_r^2$ = %.2f' % (redchi), transform=plt.gca().transAxes, fontsize=textsize) plt.xlabel('Wavelength ($\mathrm{\AA}$)', fontsize=16) plt.ylabel( '$f_\lambda\ [erg\ cm^{-2}\ s^{-1}\ \mathrm{\AA}^{-1}]$', fontsize=16) plt.yscale('log') return [ mean[0], np.sqrt(cov[0]), mean[1], np.sqrt(cov[1]), mean[2], np.sqrt(cov[2]), mean[3], np.sqrt(cov[3]) ], redchi
def run_sampler(self): import dynesty import dill logger.info("Using dynesty version {}".format(dynesty.__version__)) if self.kwargs.get("sample", "rwalk") == "rwalk": logger.info( "Using the bilby-implemented rwalk sample method with ACT estimated walks") dynesty.dynesty._SAMPLING["rwalk"] = sample_rwalk_bilby dynesty.nestedsamplers._SAMPLING["rwalk"] = sample_rwalk_bilby if self.kwargs.get("walks") > self.kwargs.get("maxmcmc"): raise DynestySetupError("You have maxmcmc > walks (minimum mcmc)") if self.kwargs.get("nact", 5) < 1: raise DynestySetupError("Unable to run with nact < 1") elif self.kwargs.get("sample") == "rwalk_dynesty": self._kwargs["sample"] = "rwalk" logger.info( "Using the dynesty-implemented rwalk sample method") elif self.kwargs.get("sample") == "rstagger_dynesty": self._kwargs["sample"] = "rstagger" logger.info( "Using the dynesty-implemented rstagger sample method") self._setup_pool() if self.resume: self.resume = self.read_saved_state(continuing=True) if self.resume: logger.info('Resume file successfully loaded.') else: if self.kwargs['live_points'] is None: self.kwargs['live_points'] = ( self.get_initial_points_from_prior(self.kwargs['nlive']) ) self.sampler = dynesty.NestedSampler( loglikelihood=_log_likelihood_wrapper, prior_transform=_prior_transform_wrapper, ndim=self.ndim, **self.sampler_init_kwargs ) if self.check_point: out = self._run_external_sampler_with_checkpointing() else: out = self._run_external_sampler_without_checkpointing() self._close_pool() # Flushes the output to force a line break if self.kwargs["print_progress"] and self.kwargs["print_method"] == "tqdm": self.pbar.close() print("") check_directory_exists_and_if_not_mkdir(self.outdir) if self.nestcheck: self.nestcheck_data(out) dynesty_result = "{}/{}_dynesty.pickle".format(self.outdir, self.label) with open(dynesty_result, 'wb') as file: dill.dump(out, file) self._generate_result(out) self.result.sampling_time = self.sampling_time if self.plot: self.generate_trace_plots(out) return self.result
def test_norstate(): # test it can work without rstate ndim = 2 dynesty.NestedSampler(loglike, prior_transform, ndim, nlive=nlive) dynesty.DynamicNestedSampler(loglike, prior_transform, ndim, nlive=nlive)
def __init__(self, model, nlive, nprocesses=1, checkpoint_time_interval=None, maxcall=None, loglikelihood_function=None, use_mpi=False, no_save_state=False, run_kwds=None, extra_kwds=None, internal_kwds=None, **kwargs): self.model = model self.no_save_state = no_save_state log_likelihood_call, prior_call = setup_calls( model, loglikelihood_function=loglikelihood_function, copy_prior=True) # Set up the pool self.pool = choose_pool(mpi=use_mpi, processes=nprocesses) self.maxcall = maxcall self.checkpoint_time_interval = checkpoint_time_interval self.run_kwds = {} if run_kwds is None else run_kwds self.extra_kwds = {} if extra_kwds is None else extra_kwds self.internal_kwds = {} if internal_kwds is None else internal_kwds self.nlive = nlive self.names = model.sampling_params self.ndim = len(model.sampling_params) self.checkpoint_file = None # Enable checkpointing if checkpoint_time_interval is set in config # file in sampler section if self.checkpoint_time_interval: self.run_with_checkpoint = True if self.maxcall is None: self.maxcall = 5000 * self.pool.size logging.info( "Checkpointing enabled, will verify every %s calls" " and try to checkpoint every %s seconds", self.maxcall, self.checkpoint_time_interval) else: self.run_with_checkpoint = False # Check for cyclic boundaries periodic = [] cyclic = self.model.prior_distribution.cyclic for i, param in enumerate(self.variable_params): if param in cyclic: logging.info('Param: %s will be cyclic', param) periodic.append(i) if len(periodic) == 0: periodic = None # Check for reflected boundaries. Dynesty only supports # reflection on both min and max of boundary. reflective = [] reflect = self.model.prior_distribution.well_reflected for i, param in enumerate(self.variable_params): if param in reflect: logging.info("Param: %s will be well reflected", param) reflective.append(i) if len(reflective) == 0: reflective = None if 'sample' in extra_kwds: if 'rwalk2' in extra_kwds['sample']: dynesty.dynesty._SAMPLING["rwalk"] = sample_rwalk_mod dynesty.nestedsamplers._SAMPLING["rwalk"] = sample_rwalk_mod extra_kwds['sample'] = 'rwalk' if self.nlive < 0: # Interpret a negative input value for the number of live points # (which is clearly an invalid input in all senses) # as the desire to dynamically determine that number self._sampler = dynesty.DynamicNestedSampler(log_likelihood_call, prior_call, self.ndim, pool=self.pool, reflective=reflective, periodic=periodic, **extra_kwds) self.run_with_checkpoint = False logging.info("Checkpointing not currently supported with" "DYNAMIC nested sampler") else: self._sampler = dynesty.NestedSampler(log_likelihood_call, prior_call, self.ndim, nlive=self.nlive, reflective=reflective, periodic=periodic, pool=self.pool, **extra_kwds) self._sampler.kwargs.update(internal_kwds) # properties of the internal sampler which should not be pickled self.no_pickle = [ 'loglikelihood', 'prior_transform', 'propose_point', 'update_proposal', '_UPDATE', '_PROPOSE', 'evolve_point', 'use_pool', 'queue_size', 'use_pool_ptform', 'use_pool_logl', 'use_pool_evolve', 'use_pool_update', 'pool', 'M' ]
def run_dynesty(self, data_to_fit, lnprob, nlive=200, bound='multi', sample='auto', maxiter=None, maxcall=None, dlogz=None, filepath='output.csv', **dynesty_kwargs): ''' Runs nested sampling retrieval through Dynesty using the Classifier to inform priors Parameters ---------- data_to_fit : array_like, shape (X,) The data you want to fit. Required for classification purposes. lnprob : function A function which must be passed a set of parameters and returns their ln likelihood. Signature should be `lnprob(params)` where params is an array with shape (n_variables, ). Note that you will need to have hard-coded the data and associated uncertainties into the `lnprob` function. nlive : int, optional The number of live points to use in the nested sampling. Default is 200. bound : str, optional Method used to approximately bound the prior using the current set of live points. Conditions the sampling methods used to propose new live points. Choices are no bound ('none'), a single bounding ellipsoid ('single'), multiple bounding ellipsoids ('multi'), balls centered on each live point ('balls'), and cubes centered on each live point ('cubes'). Default is 'multi'. sample : str, optional Method used to sample uniformly within the likelihood constraint, conditioned on the provided bounds. Unique methods available are: uniform sampling within the bounds('unif'), random walks with fixed proposals ('rwalk'), random walks with variable (“staggering”) proposals ('rstagger'), multivariate slice sampling along preferred orientations ('slice'), “random” slice sampling along all orientations ('rslice'), and “Hamiltonian” slices along random trajectories ('hslice'). 'auto' selects the sampling method based on the dimensionality of the problem (from ndim). When ndim < 10, this defaults to 'unif'. When 10 <= ndim <= 20, this defaults to 'rwalk'. When ndim > 20, this defaults to 'hslice' if a gradient is provided and 'slice' otherwise. 'rstagger' and 'rslice' are provided as alternatives for 'rwalk' and 'slice', respectively. Default is 'auto'. maxiter : int or None, optional The maximum number of iterations to run. If None, will run until stopping criterion is met. Default is None. maxcall : int or None, optional If not None, sets the maximum number of calls to the likelihood function. Default is None. **dynesty_kwargs : optional kwargs to be passed to the dynesty.NestedSampler() initialisation Returns ------- results : dict The dynesty results dictionary, with the addition of the following attributes: weights - normalised weights for each sample cov - the covariance matrix uncertainties - the uncertainty on each fitted parameter, calculated from the square root of the diagonal of the covariance matrix. ''' # First up, we need to define some variables for the Retriever # Number of dimensions we are retrieving n_dims = self.classifier.n_variables + self.n_nuisance # Make the prior transform function prior_transform = self.classifier.create_dynesty_prior_transform( data_to_fit, self.n_nuisance, self.nuisance_limits) # Set up and run the sampler here!! sampler = dynesty.NestedSampler(lnprob, prior_transform, n_dims, bound=bound, sample=sample, update_interval=float(n_dims), nlive=nlive, **dynesty_kwargs) sampler.run_nested(maxiter=maxiter, maxcall=maxcall, dlogz=dlogz) results = sampler.results # Get some normalised weights results.weights = np.exp(results.logwt - results.logwt.max()) / \ np.sum(np.exp(results.logwt - results.logwt.max())) # Calculate a covariance matrix for these results to get uncertainties cov = np.cov(results.samples, rowvar=False, aweights=results.weights) # Get the uncertainties from the diagonal of the covariance matrix diagonal = np.diag(cov) uncertainties = np.sqrt(diagonal) # Add the covariance matrix and uncertainties to the results object results.cov = cov results.uncertainties = uncertainties self._print_best(results) self._save_results(results, filepath) return results