def __call__(self, kwargs=dict()): """ Parameters ---------- kwargs : dict Dummy argument. Unused. Returns ------- results : dict pyMultinest sampling results in a dictionary containing the keys: logZ (the log-evidence), logZerror (the error in log-evidence) and samples (equal weighted posterior) """ log.debug('@ multinest_pipeline::__call__') # Checks whether a base name for multinest output files was specified if 'outputfiles_basename' not in self._sampling_controllers: # If not, uses default location self._sampling_controllers[ 'outputfiles_basename'] = 'chains/imagine_' os.makedirs('chains', exist_ok=True) # Makes sure that the chains directory exists basedir = os.path.split( self._sampling_controllers['outputfiles_basename'])[0] assert os.path.isdir(basedir) # Runs pyMultinest results = pymultinest.solve(LogLikelihood=self._mpi_likelihood, Prior=self.prior, n_dims=len(self._active_parameters), **self._sampling_controllers) return results
def run_sampler(self): import pymultinest self._verify_kwargs_against_default_kwargs() self._setup_run_directory() self._check_and_load_sampling_time_file() # Overwrite pymultinest's signal handling function pm_run = importlib.import_module("pymultinest.run") pm_run.interrupt_handler = self.write_current_state_and_exit self.start_time = time.time() out = pymultinest.solve(LogLikelihood=self.log_likelihood, Prior=self.prior_transform, n_dims=self.ndim, **self.kwargs) self._calculate_and_save_sampling_time() self._clean_up_run_directory() post_equal_weights = os.path.join(self.outputfiles_basename, "post_equal_weights.dat") post_equal_weights_data = np.loadtxt(post_equal_weights) self.result.log_likelihood_evaluations = post_equal_weights_data[:, -1] self.result.sampler_output = out self.result.samples = post_equal_weights_data[:, :-1] self.result.log_evidence = out["logZ"] self.result.log_evidence_err = out["logZerr"] self.calc_likelihood_count() self.result.outputfiles_basename = self.outputfiles_basename self.result.sampling_time = datetime.timedelta( seconds=self.total_sampling_time) return self.result
def run_sampler(self): import pymultinest self._verify_kwargs_against_default_kwargs() out = pymultinest.solve(LogLikelihood=self.log_likelihood, Prior=self.prior_transform, n_dims=self.ndim, **self.kwargs) self.result.sampler_output = out self.result.samples = out['samples'] self.result.log_evidence = out['logZ'] self.result.log_evidence_err = out['logZerr'] self.result.outputfiles_basename = self.kwargs['outputfiles_basename'] return self.result
def run_sampler(self): import pymultinest self._verify_kwargs_against_default_kwargs() out = pymultinest.solve(LogLikelihood=self.log_likelihood, Prior=self.prior_transform, n_dims=self.ndim, **self.kwargs) post_equal_weights = os.path.join(self.kwargs['outputfiles_basename'], 'post_equal_weights.dat') post_equal_weights_data = np.loadtxt(post_equal_weights) self.result.log_likelihood_evaluations = post_equal_weights_data[:, -1] self.result.sampler_output = out self.result.samples = post_equal_weights_data[:, :-1] self.result.log_evidence = out['logZ'] self.result.log_evidence_err = out['logZerr'] self.result.outputfiles_basename = self.kwargs['outputfiles_basename'] return self.result
def __call__(self, **kwargs): """ Integrate. """ try: kwargs['sampling_efficiency'] /= self._prior.unit_hypercube_frac except KeyError: kwargs[ 'sampling_efficiency'] = 1.0 / self._prior.unit_hypercube_frac yield 'Sampling efficiency set to: %.4f.' % kwargs[ 'sampling_efficiency'] # ignore the pymultinest output object # it has never been used in the author's workflows but change this # it is useful to you _ = pymultinest.solve(self._likelihood, self._prior.inverse_sample, self._ndims, log_zero=self._likelihood.llzero, **kwargs)
def __call__(self): """ :return: pyMultinest sampling results """ # create dir for storing pymultinest output path = os.path.join(os.getcwd(), 'chains') if not os.path.isdir(path): try: os.mkdir(path) except OSError: pass assert (os.path.isdir(path)) # run pymultinest results = pymultinest.solve(LogLikelihood=self._mpi_likelihood, Prior=self.prior, n_dims=len(self._active_parameters), outputfiles_basename='chains/imagine_', **self._sampling_controllers) return results
def __call__(self, **kwargs): """ Integrate. :param kwargs: Keyword arguments passed to :func:`pymultinest.solve`. """ kwargs.setdefault('sampling_efficiency', 0.8) kwargs['sampling_efficiency'] /= self._prior.unit_hypercube_frac yield 'Sampling efficiency set to: %.4f.' % kwargs[ 'sampling_efficiency'] # ignore the pymultinest output object # it has never been used in the author's workflows but change this # it is useful to you _ = pymultinest.solve(self._likelihood, self._prior.inverse_sample, self._ndims, log_zero=self._likelihood.llzero, **kwargs)
# delta=0.4 # alpha_1=1 # alpha_2=3 # ph=model_f(t,F_0,m_f,nu_b0,m_b,alpha_1,alpha_2) LB = [0, 0, 0, 0, 0, 0] #Lower bound for parameters UB = [np.inf, 10, np.inf, 10, 10, 10] #Upperbound for parameters if not os.path.exists('test'): os.makedirs('test') # pymultinest.run(loglike, prior, n_params, outputfiles_basename='out/'+GRB + '_fit_', # resume = False, verbose = True) result = pymultinest.solve(LogLikelihood=loglike, Prior=prior, n_dims=n_params, outputfiles_basename='test/' + GRB + '_fit_', resume=False, verbose=True) json.dump(parameters, open('out/' + GRB + '_fit_' + 'params.json', 'w')) ############################Plotting and stuff################################################### # plot the distribution of a posteriori possible models # plt.figure() # plt.scatter(t_data,lc_data) # # plt.plot(x, ydata, '+ ', color='red', label='data') # a = pymultinest.Analyzer(outputfiles_basename='out/'+GRB + '_fit_', n_params = n_params) # for (beta1,beta2,p_F,p_b,F_0,nu_b0) in a.get_equal_weighted_posterior()[::100,:-1]: # plt.plot(t_data, model_f(t_data,nu,beta1,beta2,p_F,p_b,F_0,nu_b0), '-', color='blue', alpha=0.3, label='data') # plt.xscale('log') # plt.savefig('out/'+GRB + '_fit_posterior.png')
def MN_fit(self, lnev_tol=0.1, n_live_points='auto', sampling_efficiency=0.3, INS=True, const_eff=True,basename=None, verbose=False, resume=False): ''' Fit with Nested Sampling (MultiNest algorithm). For Nested Sampling, the prior and likelihood are not simply combined into a posterior (which is the only function passed to EMCEE), but they are used differently to explore the probability space and get an estimated for the log-evidence. Parameters ---------- lnev_tol : bool, optional Tolerance in the log-evidence. This sets the termination condition for the nested sampling algorithm. Default is 0.5. n_live_points : int, optional Number of live points. If set to 'auto', use a default of 25*ndim sampling_efficiency : float, optional Sets the enlargement factor for ellipsoidal fits in MultiNest. Default is 0.3 (appropriate for model selection). INS : bool, optional Setting to activate Importance Nested Sampling in MultiNest. Refer to [Feroz et al., MNRAS 2014]. basename : str, optional Location where MultiNest output is written. verbose: bool, optional Boolean indicating whether to output verbose MultiNest progress. ''' # obtain maximum likelihood fits theta0 = self.lineModel.guessFit() self.result_ml = self.optimizeFit(theta0) self.theta_ml = self.result_ml['x'] # save theta_ml also in the lineModel object, # so that constraints may be set based on ML result self.lineModel.theta_ml = self.theta_ml # save moments obtained from maximum likelihood optimization self.m_ml = self.lineModel.modelMoments(self.theta_ml) # dimensionality of the problem ndim = self.lineModel.thetaLength() try: import pymultinest except: print("********************") print("Could not import pyMultiNest! Make sure that both this is in your PYTHONPATH.") print("MultiNest must also be on your LD_LIBRARY_PATH") raise ValueError("Abort BSFC fit") pymultinest.solve( self.lineModel.ns_lnlike, # log-likelihood self.lineModel.hypercube_lnprior_generalized_simplex, # log-prior ndim, outputfiles_basename=basename, n_live_points=200+int(25*ndim) if n_live_points=='auto' else int(n_live_points), importance_nested_sampling = INS, const_efficiency_mode = const_eff, # only appropriate with INS evidence_tolerance = lnev_tol, sampling_efficiency = sampling_efficiency, n_iter_before_update = 1000, #MultiNest internally multiplies by 10 max_modes = 100, mode_tolerance = -1e90, #keeps all modes verbose = verbose, resume = resume, ) self.good=True
def call(self, **kwargs): """ Runs the IMAGINE pipeline using the MultiNest sampler Returns ------- results : dict pyMultinest sampling results in a dictionary containing the keys: logZ (the log-evidence), logZerror (the error in log-evidence) and samples (equal weighted posterior) """ log.debug('@ multinest_pipeline::__call__') # Resets internal state self.tidy_up() default_solve_params = { 'resume': True, 'n_live_points': 400, 'evidence_tolerance': 0.5, 'max_iter': 0, 'log_zero': -1e100, 'importance_nested_sampling': True, 'sampling_efficiency': 0.8, 'multimodal': True, 'mode_tolerance': -1e90, 'null_log_evidence': -1e90, 'n_clustering_params': None, 'outputfiles_basename': None, 'max_modes': 100, 'n_iter_before_update': 100, 'verbose': True } # Keyword arguments can alter the sampling controllers self.sampling_controllers = kwargs # Updates the dict # Sets base name for multinest output files chains_prefix = path.join(self.chains_directory, 'multinest_') self.sampling_controllers['outputfiles_basename'] = chains_prefix # Prepares initialization and run parameters from # defaults and sampling controllers solve_params = { k: self.sampling_controllers.get(k, default) for k, default in default_solve_params.items() } # Updates the sampling controllers to reflect what is being used self.sampling_controllers = solve_params # Updates the dict if not self.sampling_controllers['resume']: self.clean_chains_directory() # Runs pyMultinest log.info('Calling pymultinest.solve') self.results = pymultinest.solve( LogLikelihood=self._likelihood_function, Prior=self.prior_transform, n_dims=len(self._active_parameters), wrapped_params=None, write_output=True, seed=self.master_seed, **solve_params) self._samples_array = self.results['samples'] self._evidence = self.results['logZ'] self._evidence_err = self.results['logZerr'] return self.results
# nu_b0=1e28 # m_b=2 # delta=0.4 # alpha_1=1 # alpha_2=3 # ph=model_f(t,F_0,m_f,nu_b0,m_b,alpha_1,alpha_2) LB = [0, 0, 0, 0, 0, 0] #Lower bound for parameters UB = [np.inf, 10, np.inf, 10, 10, 10] #Upperbound for parameters if not os.path.exists('out'): os.makedirs('out') # pymultinest.run(loglike, prior, n_params, outputfiles_basename='out/'+GRB + '_fit_', # resume = False, verbose = True) result=pymultinest.solve(LogLikelihood=loglike,Prior=prior,n_dims= n_params, outputfiles_basename='out/'+GRB + '_fit_',n_live_points=1000,sampling_efficiency=0.8, \ resume=True,verbose = True) json.dump(parameters, open('out/' + GRB + '_fit_' + 'params.json', 'w')) ############################Plotting and stuff################################################### # plot the distribution of a posteriori possible models # plt.figure() # plt.scatter(t_data,lc_data) # # plt.plot(x, ydata, '+ ', color='red', label='data') # a = pymultinest.Analyzer(outputfiles_basename='out/'+GRB + '_fit_', n_params = n_params) # for (beta1,beta2,p_F,p_b,F_0,nu_b0) in a.get_equal_weighted_posterior()[::100,:-1]: # plt.plot(t_data, model_f(t_data,nu,beta1,beta2,p_F,p_b,F_0,nu_b0), '-', color='blue', alpha=0.3, label='data') # plt.xscale('log') # plt.savefig('out/'+GRB + '_fit_posterior.png') # a_lnZ = a.get_stats()
# ph_model=model_ph(t_data,nu,beta1,beta2,p_F,p_b,F_0,nu_b0) # likelihood = ((-0.5*(ph_model- ph_data)/ph_err)**2).sum() loglikelihood = (-0.5*( (lc_model- lc_data))**2).sum() loglikelihood=loglikelihood - 0.5*sum(np.log(2.*np.pi*lc_err**2)) # likelihood = exp(-0.5 * ((pos - positions) / width)**2) / (2*pi*width**2)**0.5 return loglikelihood dire="PL" if not os.path.exists(dire): os.makedirs(dire) # pymultinest.run(loglike, prior, n_params, outputfiles_basename='out/'+GRB + '_fit_', # resume = False, verbose = True) n_live=500 tol=0.3 result=pymultinest.solve(LogLikelihood=loglike,Prior=prior,n_dims= n_params, outputfiles_basename=dire+'/'+GRB + '_fit_', resume=False,verbose = True,n_live_points=n_live,sampling_efficiency=0.3) json.dump(parameters, open(dire+'/'+GRB + '_fit_'+'params.json', 'w')) print() print('evidence: %(logZ).1f +- %(logZerr).1f' % result) print() print('parameter values:') par_fit=[] for name, col in zip(parameters, result['samples'].transpose()): par_fit.append([col.mean(), col.std()]) print('%15s : %.3e +- %.3e' % (name, col.mean(), col.std())) with open('GRB %sparams.json' % GRB, 'w') as f: json.dump(parameters, f, indent=2) par_fit=np.array(par_fit) F_0,t_b,beta_1,beta_2=par_fit[:,0]
def loglike(cube): chi2=0.0 parmlist=np.ones(ndim) for i in range(ndim): parmlist[i] = cube[i] thetaj, n, p, eB, eE, en = parmlist chi2 = (((lcflux(tobs,nuobs,parmlist) - fdata)/erdata)**2).sum() mxlik = -0.5*chi2-0.5*sum(np.log(2.*np.pi*erdata**2)) return mxlik # This code both runs PyMultiNest and generates forest plots # To run PyMultiNest uncomment the below block while commenting the plotting block #--------------------------running multinest------------------ nlive=par.nlive # Number of live walkers tol=0.3 parameters=[r'$\theta_{j}$', r'$log n_{0}$','p',r'$log \epsilon_{B}$',r'$log \epsilon_{E}$',r'$log Ek$'] #parameters=['s',r'$p_{FS}$',r'$\lambda$',r'$\iota$',r'$log\/\nu_m$',r'$log\/\nu_c$',r'$log\/f_m$',r'$log\/\tau_m$'] # solve(loglike, prior, n_dims=ndim, outputfiles_basename=datafile + '_', resume = False, verbose = False,n_live_points=nlive,sampling_efficiency=0.3) result=pymultinest.solve(loglike, prior, n_dims=ndim, outputfiles_basename=datafile + '_', resume = False, verbose = False,n_live_points=nlive,sampling_efficiency=0.3) json.dump(parameters, open(datafile + '_params.json', 'w')) # save parameter names #-----------------------multinest OVER--------------------------