コード例 #1
0
    def fit_multinest(self, n_live_points=1000, basename="chains/1-", verbose=True, overwrite=True, **kwargs):

        self._mnest_basename = basename

        # creates the directory for the output
        # folder = os.path.abspath(os.path.dirname(self._mnest_basename))
        # if not os.path.exists(self._mnest_basename):
        #    os.makedirs(self._mnest_basename)

        if hasattr(self, "which"):
            self.n_params = 9 + 6 * self.lc.n_planets
        else:
            self.n_params = 5 + 6 * self.lc.n_planets

        pymultinest.run(
            self.mnest_loglike,
            self.mnest_prior,
            self.n_params,
            n_live_points=n_live_points,
            outputfiles_basename=self._mnest_basename,
            verbose=verbose,
            **kwargs
        )

        self._make_samples()
コード例 #2
0
ファイル: __init__.py プロジェクト: JohannesBuchner/syscorr
def multinest(parameter_names, transform, loglikelihood, output_basename, **problem):
	parameters = parameter_names
	n_params = len(parameters)
	
	def myprior(cube, ndim, nparams):
		params = transform([cube[i] for i in range(ndim)])
		for i in range(ndim):
			cube[i] = params[i]
	
	def myloglike(cube, ndim, nparams):
		l = loglikelihood([cube[i] for i in range(ndim)])
		return l
	
	# run MultiNest
	mn_args = dict(
		outputfiles_basename = output_basename,
		resume = problem.get('resume', False), 
		verbose = True,
		n_live_points = problem.get('n_live_points', 400))
	if 'seed' in problem:
		mn_args['seed'] = problem['seed']
	pymultinest.run(myloglike, myprior, n_params, **mn_args)

	import json
	# store name of parameters, always useful
	with file('%sparams.json' % output_basename, 'w') as f:
		json.dump(parameters, f, indent=2)
	# analyse
	a = pymultinest.Analyzer(n_params = n_params, 
		outputfiles_basename = output_basename)
	s = a.get_stats()
	with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
		json.dump(s, f, indent=2)
	return a
コード例 #3
0
def lfit(xx, yy, error, bounds=[-10., 10., -10., 10.]):
    # linear fit with nested sampling

    # prevents seg fault in MultiNest
    error[error == 0] = np.mean(error[error != 0])

    def model_lin(params):
        return params[0] + xx * params[1]

    def myprior_lin(cube, ndim, n_params):
        '''This transforms a unit cube into the dimensions of your prior space.'''
        cube[0] = (bounds[1] - bounds[0]) * cube[0] + bounds[0]
        cube[1] = (bounds[3] - bounds[2]) * cube[1] + bounds[2]

    def myloglike_lin(cube, ndim, n_params):
        loglike = -np.sum(((yy - model_lin(cube)) / error)**2)
        return loglike / 2.

    pymultinest.run(
        myloglike_lin,
        myprior_lin,
        2,
        resume=False,
        sampling_efficiency=0.5,
        evidence_tolerance=0.1,
    )

    # retrieves the data that has been written to hard drive
    a = pymultinest.Analyzer(n_params=2)
    posteriors = a.get_data()
    stats = a.get_stats()
    stats['marginals'] = get_stats(posteriors)

    return stats, posteriors
コード例 #4
0
def test():
	test.prior_was_called = False
	test.loglike_was_called = False
	test.dumper_was_called = False
	
	def myprior(cube, ndim, nparams):
		for i in range(ndim):
			cube[i] = cube[i] * 10 * math.pi
		test.prior_was_called = True

	def myloglike(cube, ndim, nparams):
		chi = 1.
		for i in range(ndim):
			chi *= math.cos(cube[i] / 2.)
		test.loglike_was_called = True
		return math.pow(2. + chi, 5)

	def mydumper(nSamples,nlive,nPar,
			physLive,posterior,paramConstr,
			maxLogLike,logZ,logZerr,nullcontext):
		print("calling dumper")
		test.dumper_was_called = True

	# number of dimensions our problem has
	parameters = ["x", "y"]
	n_params = len(parameters)

	# run MultiNest
	pymultinest.run(myloglike, myprior, n_params, 
		resume = True, verbose = True,
                dump_callback=mydumper)
	assert test.prior_was_called
	assert test.loglike_was_called
	assert test.dumper_was_called
コード例 #5
0
    def perform_scan(self, run_tag=None, nlive=100, pymultinest_options=None):
        """ When called creates the directories for and then performs the scan

            run_tag: label of file in which the chains output is stored
            nlive: number of live points to user during the scan. The default
            value of 100 is chosen to speed up testing, but for actual runs
            a larger value is recommended
            pymultinest_options: Custom user inputs passed to multinest; must
            be inserted in the form of a dictionary, as for the default below
        """

        self.run_tag = run_tag
        self.make_dirs_for_run(run_tag)

        if not pymultinest_options:
            # Set defaults
            pymultinest_options = {
                'importance_nested_sampling': False,
                'resume': False,
                'verbose': True,
                'sampling_efficiency': 'model',
                'init_MPI': False,
                'evidence_tolerance': 0.5,
                'const_efficiency_mode': False
            }
        else:
            pass  # Use passed parameters

        # Run MultiNest
        pymultinest.run(self.ll,
                        self.lp,
                        self.n_params,
                        outputfiles_basename=self.chains_dir_for_run,
                        n_live_points=nlive,
                        **pymultinest_options)
コード例 #6
0
def RunMultinest():
    def loglike(cube, ndim, nparams):
        n_signal = events_gen_stat(cube)
        ll = obs * log(n_signal) - n_signal - gammaln(obs + 1)
        return sum(ll)

    save_str = "cenns10_stat"
    out_str = "multinest/" + save_str + "/" + save_str
    json_str = "multinest/" + save_str + "/params.json"

    # Run the sampler with CEvNS, BRN, and SS.
    pymultinest.run(loglike,
                    prior_stat,
                    4,
                    outputfiles_basename=out_str,
                    resume=False,
                    verbose=True,
                    n_live_points=1000,
                    evidence_tolerance=0.5,
                    sampling_efficiency=0.8)

    # Save the parameter names to a JSON file.
    params_stat = [
        "cevns_norm", "ss_norm", "BRN_prompt_norm", "BRN_delayed_norm"
    ]
    json.dump(params_stat, open(json_str, 'w'))
コード例 #7
0
ファイル: fit.py プロジェクト: rcoch/bagpipes
    def fit(self, verbose=False, n_live=400):
        """ Fit the specified model to the input galaxy data.

        Parameters
        ----------

        verbose : bool - optional
            Set to True to get progress updates from the sampler.

        n_live : int - optional
            Number of live points: reducing speeds up the code but may
            lead to unreliable results.
        """

        if "lnz" in list(self.results):
            print("Fitting not performed as results have already been" +
                  " loaded from " + self.fname[:-1] + ".h5. To start" +
                  " over delete this file or change run.\n")
            return

        print("\nBagpipes: fitting object " + self.galaxy.ID + "\n")

        start_time = time.time()

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            pmn.run(self.fitted_model.lnlike,
                    self.fitted_model.prior.transform,
                    self.fitted_model.ndim,
                    importance_nested_sampling=False,
                    verbose=verbose,
                    sampling_efficiency="model",
                    n_live_points=n_live,
                    outputfiles_basename=self.fname)

        runtime = time.time() - start_time

        print("\nCompleted in " + str("%.1f" % runtime) + " seconds.\n")

        # Load MultiNest outputs and save basic quantities to file.
        samples2d = np.loadtxt(self.fname + "post_equal_weights.dat")[:, :-1]
        lnz_line = open(self.fname + "stats.dat").readline().split()

        self.results["samples2d"] = samples2d
        self.results["lnz"] = float(lnz_line[-3])
        self.results["lnz_err"] = float(lnz_line[-1])
        self.results["median"] = np.median(samples2d, axis=0)
        self.results["conf_int"] = np.percentile(samples2d, (16, 84), axis=0)

        # Save re-formatted outputs as HDF5 and remove MultiNest output.
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            dd.io.save(self.fname[:-1] + ".h5", self.results)

        os.system("rm " + self.fname + "*")

        # Create a posterior object to hold the results of the fit.
        self.posterior = posterior(self.galaxy, run=self.run)

        self._print_results()
コード例 #8
0
def main():
    # Begin timing the estimation process
    start_time = time.time()
    
    # Run the MultiNest software
    pmn.run(binary_logit_log_likelihood, uniform_prior, num_dimensions,
            outputfiles_basename=relative_output_folder,
            n_live_points=num_live_points,
            sampling_efficiency=desired_sampling_efficiency,
            log_zero=-1e200,
            mode_tolerance=-1e180,
            null_log_evidence=-1e180,
            resume=False, verbose=True, init_MPI=False)
            
    # Record the ending time of the estimation process
    end_time = time.time()
    tot_minutes = (end_time - start_time) / 60.0
            
    # Save the parameter names
    with open(relative_output_folder + "parameter_names.json", 'wb') as f:
        json.dump(explanatory_vars, f)
        
    # Save the number of live points used as the total estimation time
    model_run_params = {"n_live_points": num_live_points,
                        "sampling_efficiency": desired_sampling_efficiency,
                        "estimation_minutes": tot_minutes}
    with open(relative_output_folder + "model_run_parameters.json", "wb") as f:
        json.dump(model_run_params, f)
        
    # Print a report on how long the estimation process took
    print "Estimation process took {:.2f} minutes".format(tot_minutes)
コード例 #9
0
    def run(self):

        pymultinest.run(self.mod_file.loglikelihood,
                        self.mod_file.prior,
                        self.mod_file.n_pars,
                        n_clustering_params=None,
                        wrapped_params=None,
                        importance_nested_sampling=True,
                        multimodal=True,
                        const_efficiency_mode=False,
                        n_live_points=self.n_live,
                        evidence_tolerance=self.tolerance,
                        sampling_efficiency=0.8,
                        n_iter_before_update=100,
                        null_log_evidence=-1e+90,
                        max_modes=100,
                        mode_tolerance=-1e+90,
                        outputfiles_basename=self.model_name + "/chains/",
                        seed=-1,
                        verbose=True,
                        resume=self.resume,
                        context=0,
                        write_output=True,
                        log_zero=-1e+100,
                        max_iter=self.max_iterations,
                        init_MPI=False,
                        dump_callback=None)
コード例 #10
0
def RunMultinestNull():
    def loglike(cube, ndim, nparams):
        n_signal = events_gen_stat_null(cube)
        ll = cut_crit*nan_to_num(obs * log(n_signal) - n_signal - gammaln(obs+1)) \
             - (cube[0] - 1)**2 / (2 * ss_error**2) \
             - (cube[1] - 1)**2 / (2 * 0.3**2) \
             - (cube[2] - 1)**2 / (2)
        return sum(nan_to_num(ll))

    save_str = "cenns10_stat_null_llConstraint"
    out_str = "multinest/" + save_str + "/" + save_str
    json_str = "multinest/" + save_str + "/params.json"

    # Run the sampler with just BRN, and SS.
    pymultinest.run(loglike,
                    prior_stat_null,
                    3,
                    outputfiles_basename=out_str,
                    resume=False,
                    verbose=True,
                    n_live_points=1000,
                    evidence_tolerance=0.1,
                    sampling_efficiency=0.8)

    # Save the parameter names to a JSON file.
    params_stat_null = ["ss_norm", "BRN_prompt_norm", "BRN_delayed_norm"]
    json.dump(params_stat_null, open(json_str, 'w'))
コード例 #11
0
    def fit_multinest(self,
                      n_live_points=1000,
                      basename='chains/1-',
                      verbose=True,
                      overwrite=True,
                      **kwargs):

        self._mnest_basename = basename

        #creates the directory for the output
        #folder = os.path.abspath(os.path.dirname(self._mnest_basename))
        #if not os.path.exists(self._mnest_basename):
        #    os.makedirs(self._mnest_basename)

        if hasattr(self, 'which'): self.n_params = 9 + 6 * self.lc.n_planets
        else: self.n_params = 5 + 6 * self.lc.n_planets

        pymultinest.run(self.mnest_loglike,
                        self.mnest_prior,
                        self.n_params,
                        n_live_points=n_live_points,
                        outputfiles_basename=self._mnest_basename,
                        verbose=verbose,
                        **kwargs)

        self._make_samples()
コード例 #12
0
def RunMultinestNull():
    def marginalizedLikelihood(cube, ndim, nparams):
        def likelihood(s1, s2, s3):
            signal = events_gen_null([cube[0], cube[1], cube[2], s1, s2, s3])
            return np.prod(pois(
                signal, obs)) * systGaus(s1) * systGaus(s2) * systGaus(s3)

        ll = log(nquad(likelihood, [[0.0, 1.0], [0.0, 1.0], [0.5, 1.0]])[0])
        print(ll)
        return ll

    save_str = "syst_marginalized_null"
    out_str = "multinest/" + save_str + "/" + save_str
    json_str = "multinest/" + save_str + "/params.json"

    # Run the sampler with just BRN, and SS.
    pymultinest.run(marginalizedLikelihood,
                    prior_null,
                    6,
                    outputfiles_basename=out_str,
                    resume=False,
                    verbose=True,
                    n_live_points=2000,
                    evidence_tolerance=0.5,
                    sampling_efficiency=0.8)

    # Save the parameter names to a JSON file.
    params_null = ["ss_norm", "BRN_prompt_norm", "BRN_delayed_norm"]
    json.dump(params_null, open(json_str, 'w'))
コード例 #13
0
    def run(self, outputfiles_basename, resume=False, verbose=True):
        '''
		
		:param outputfiles_basename:
		:param resume:
		:param verbose:
		:return:
		'''
        if c_lib_link is not None:
            pymultinest.run(self.log_like_c,
                            self.prior,
                            self.n_params,
                            outputfiles_basename=outputfiles_basename,
                            resume=resume,
                            verbose=verbose)
        else:
            pymultinest.run(self.log_like,
                            self.prior,
                            self.n_params,
                            outputfiles_basename=outputfiles_basename,
                            resume=resume,
                            verbose=verbose)
        #a1 = pymultinest.Analyzer(outputfiles_basename=outputfiles_basename, n_params = self.n_params)
        a1 = Analyzer(outputfiles_basename=outputfiles_basename,
                      n_params=self.n_params)
        return a1
コード例 #14
0
ファイル: optimizers.py プロジェクト: nmearl/pynamic
def multinest(optimizer, nprocs=1):
    # number of dimensions our problem has
    parameters = ["{0}".format(i)
                  for i in range(len(optimizer.params.get_all(True)))]
    nparams = len(parameters)

    if not os.path.exists('chains'):
        os.mkdir('chains')

    def lnprior(cube, ndim, nparams):
        theta = np.array([cube[i] for i in range(ndim)])

        for i in range(len(optimizer.params.get_all(True))):
            param = optimizer.params.get_all(True)[i]

            if "mass_" in param.name:
                theta[i] = 10 ** (theta[i] * 8 - 9)
            elif "radius_" in param.name:
                theta[i] = 10 ** (theta[i] * 4 - 4)
            elif "flux_" in param.name:
                theta[i] = 10 ** (theta[i] * 4 - 4)
            elif "a_" in param.name:
                theta[i] = 10 ** (theta[i] * 2 - 2)
            elif "e_" in param.name:
                theta[i] = 10 ** (theta[i] * 3 - 3)
            elif "inc_" in param.name:
                theta[i] *= 2.0 * np.pi
            elif "om_" in param.name:
                theta[i] = 2.0 * np.pi * 10 ** (theta[i] * 2 - 2)
            elif "ln_" in param.name:
                theta[i] = 2.0 * np.pi * 10 ** (theta[i] * 8 - 8)
            elif "ma_" in param.name:
                theta[i] = 2.0 * np.pi * 10 ** (theta[i] * 2 - 2)

        for i in range(ndim):
            cube[i] = theta[i]

    def lnlike(cube, ndim, nparams):
        theta = np.array([cube[i] for i in range(ndim)])

        optimizer.params.update(theta)
        mod_flux, mod_rv = optimizer.model(nprocs)

        flnl = -(0.5 * ((mod_flux - optimizer.photo_data[1]) /
                        optimizer.photo_data[2]) ** 2)
        rvlnl = -(0.5 * ((mod_rv - optimizer.rv_data[1]) /
                         optimizer.rv_data[2]) ** 2)
        tlnl = np.sum(flnl) + np.sum(rvlnl)

        nobj = np.append(np.sum(flnl) + np.sum(rvlnl), theta)
        optimizer.chain = np.vstack([optimizer.chain, nobj])

        if tlnl > optimizer.maxlnp:
            optimizer.iterout(tlnl, theta, mod_flux)

        return np.sum(flnl) + np.sum(rvlnl)

    # run MultiNest
    pymultinest.run(lnlike, lnprior, nparams, n_live_points=1000)
コード例 #15
0
ファイル: fit.py プロジェクト: skmqft/PriorFlow
def fit_without_background_uncertainty(events_generator,
                                       n_params,
                                       n_bg,
                                       n_obs,
                                       sigma,
                                       prior,
                                       out_put_dir,
                                       resume=False,
                                       verbose=True,
                                       n_live_points=1500,
                                       evidence_tolerance=0.1,
                                       sampling_efficiency=0.3,
                                       **kwargs):
    """
    fitting data using provided loglikelihood,
    assumming no uncertainty on background,
    the default of each parameter ranges from 0 to 1 uniformly,
    use prior to modify the range or distribution of parameters
    :param events_generator: functions to generate predicted number of events
    :param n_params: number of parameters to be fitted
    :param n_bg: background data, 1d array
    :param n_obs: experiment observed data, 1d array
    :param sigma: systemmatic uncertainty on signal
    :param prior: prior for each parameters
    :param out_put_dir: out put directories
    :param resume: multinest parameter, default is False
    :param verbose: multinest parameter, default is True
    :param n_live_points: multinest parameter, default is 1500
    :param evidence_tolerance: multinest parameter, default is 0.1
    :param sampling_efficiency: multinest parameter, default is 0.3
    :param kwargs other parameters for multinest
    """

    # pymultinest requires ndim and nparams in loglikelihood and prior, it seems we don't need it here
    def lgl(cube, ndim, nparams):
        n_signal = events_generator(cube)
        likelihood = np.zeros(n_obs.shape[0])
        for i in range(n_obs.shape[0]):
            likelihood[i] = quad(
                lambda a: _poisson(n_obs[i], (1 + a) * n_signal[i] + n_bg[i]) *
                _gaussian(a, 0, sigma), -3 * sigma, 3 * sigma)[0]
        prod_like = np.prod(likelihood)
        return np.log(prod_like) if prod_like > 0 else -np.inf

    def prr(cube, ndim, nparams):
        prior(cube)

    pymultinest.run(lgl,
                    prr,
                    n_params,
                    outputfiles_basename=out_put_dir + '_',
                    resume=resume,
                    verbose=verbose,
                    n_live_points=n_live_points,
                    evidence_tolerance=evidence_tolerance,
                    sampling_efficiency=sampling_efficiency,
                    **kwargs)
コード例 #16
0
ファイル: mcmc.py プロジェクト: raphaelshirley/regphot
def multinestevidence(mean,cov,model):
    """
    Input prior over paramters and black box function for evaluating ln liklihood
    based on chi2 from image/model and calcualte evidence for given model
    in preferebly the exact same way to emcee to allow comparison
    """
    prior1 = prior(mean,cov)
    pymultinest.run(lnlike, prior1.pdf, n_params, 
                    resume = True, verbose = True)
コード例 #17
0
    def run_scan(self):
        if os.path.exists(self.chain_path):
            shutil.rmtree(self.chain_path)
        os.makedirs(self.chain_path)
        os.chdir(self.chain_path)

        pymultinest.run(self.likelihood_val, self.prior, 3, **self.pnest_pars)
        os.chdir(path)
        return
コード例 #18
0
def multinestrun(LogLikelihood,my_prior,n_dims,ES,n_params,**kwargs):


    '''
    Author: yuanfang
    Data: 2017.11.17 
    Run multinest
    ''' 
    yy=raw_input('running in yuanfang multinest function...')
#initialize the cube
    cube=[0.0]*n_params
            
    # def my_prior(cube,ndim,n_params):
    #     ''' 
    #     generate prior for multinest algorithm
    #     return cube whose 1~n_dim is the input parameter
    #     2017.11.19 
    #     '''

    #     for i,name in enumerate(ES.InPar):
    #         if ES.InputPar[name][1].lower() == 'flat':
    #             min = float(ES.InputPar[name][2])
    #             max = float(ES.InputPar[name][3])
    #             cube[i] = random() * (max - min) + min 
    #         elif ES.InputPar[name][1].lower() == 'log':
    #             import math
    #             min = math.log10(float(ES.InputPar[name][2]))
    #             max = math.log10(float(ES.InputPar[name][3]))
    #             cube[i] = 10.0**(random()*(max - min) + min )
    #         else:
    #             sf.ErrorStop( 'Not ready. Only "flat" and "log" prior can be used.' )
            
                    
    import pymultinest
    pymultinest.run(
        LogLikelihood        = LogLikelihood,  
        Prior                = my_prior,
        n_dims               = len(ES.InPar),
        n_params             = n_params,
        seed                 = ES.getRandomSeed(),
        outputfiles_basename = ES.MNOutputFile,
        n_live_points        = kwargs.pop('n_live_points',2*n_dims),
        n_clustering_params  = kwargs.pop('n_clustering_params',2),
        multimodal           = kwargs.pop('multimodal',True),
        const_efficiency_mode= kwargs.pop('const_efficiency_mode', False), # set to false by default 
        evidence_tolerance   = kwargs.pop('evidence_tolerance',1), 
        sampling_efficiency  = kwargs.pop('sampling_efficiency',1),
        n_iter_before_update = kwargs.pop('n_iter_before_update',ES.getPointNum()),
        null_log_evidence    = kwargs.pop('null_log_evidence',-1e+100),
        max_modes            = kwargs.pop('max_modes',5),
        verbose              = kwargs.pop('verbose',True),
        resume               = kwargs.pop('resume',False),  # !!!!!!!!
        context              = kwargs.pop('context',0),
        importance_nested_sampling = kwargs.pop('importance_nested_sampling',True)
    )
                
    json.dump(ES.InPar, open(ES.MNOutputFile+'input.json', 'w')) # save parameter names
コード例 #19
0
    def run_multinest(rperp, sigmag, invcov, splashback, outfile):
        def Prior(cube, ndim, nparams):
            # Sigma Values are from Chang 2018 Table 2. Each sigma is half a prior range
            cube[0] = uniform(-0.92, -0.22, cube[0])  # log(alpha)
            cube[1] = uniform(0.28, 1.28, cube[1])  # log(beta)
            cube[2] = uniform(-0.4, 1.5, cube[2])  # log(gamma)
            cube[3] = uniform(-1.02, -0.62, cube[3])  # r_s
            cube[4] = uniform(-1., 19.,
                              cube[4])  # r_t  #17 is probably better than 19
            cube[5] = uniform(-1.4, -0.9, cube[5])  # rho_0
            cube[6] = uniform(0.8, 2.4, cube[6])  # rho_s # 1.6 \pm 0.8
            cube[7] = uniform(1.17, 1.77, cube[7])  # s_e
            cube[8] = uniform(-0.9, 0., cube[8])  # ln(c_mis)
            cube[9] = uniform(0.11, 0.7, cube[9])  # f_mis

        def Loglike(cube, ndim, nparams):
            # Read in parameters
            log_alpha = cube[0]
            log_beta = cube[1]
            log_gamma = cube[2]
            r_s = cube[3]
            r_t = cube[4]
            rho_0 = cube[5]
            rho_s = cube[6]
            se = cube[7]
            ln_mis = cube[8]
            f_mis = cube[9]
            params = [
                log_alpha, log_beta, log_gamma, r_s, r_t, rho_0, rho_s, se,
                ln_mis, f_mis
            ]

            # Calculate likelihood
            sig_m = Sigmag(rperp, z, params, h0, splashback)
            vec = sig_m - sigmag
            likelihood = -0.5 * np.matmul(np.matmul(vec, invcov), vec.T)

            # Calculate prior
            prior = -0.5 * (-1.13 - ln_mis)**2 / 0.22**2 - 0.5 * (
                log_alpha - np.log10(0.19))**2 / 0.4**2 - 0.5 * (
                    log_beta - np.log10(6.0))**2 / 0.4**2 - 0.5 * (
                        log_gamma - np.log10(4.0))**2 / 0.4**2 - 0.5 * (
                            f_mis - 0.22)**2 / 0.11**2
            #prior = 0.

            # Total probability
            tot = likelihood + prior

            return tot

        # Run Multinest
        mult.run(Loglike,
                 Prior,
                 10,
                 outputfiles_basename=outfile,
                 verbose=False)
コード例 #20
0
def main():
    cube = [ 0.9, 0.5, 0.1 ] # initial values not used
    ndim = len(cube)
    nparams = len(cube)

    os.chdir('/home/jordi/allst/sample')   

    pm.run(F_calc_Likelihood4stmd, F_allpriors, nparams,importance_nested_sampling = False,
	   resume = False, verbose = True, n_live_points=32, outputfiles_basename="DMco_",
           sampling_efficiency=0.02,const_efficiency_mode=True,init_MPI=False)
コード例 #21
0
def RunMultinest():
    def marginalizedLikelihood(cube, ndim, nparams):
        def likelihood(s1, s2, s3, s4, s5):
            signal = events_gen(
                [cube[0], cube[1], cube[2], cube[3], s1, s2, s3, s4, s5])
            return np.prod(pois(signal, obs)) * systGaus(s1) * systGaus(
                s2) * systGaus(s3) * systGaus(s4) * systGaus(s5)

        ll = log(
            nquad(likelihood, [[0.0, 1.0], [0.5, 1.0], [0.0, 1.0], [0.0, 1.0],
                               [0.5, 1.0]])[0])
        print(ll)
        return ll

    npoints = 100
    rvs1 = np.random.uniform(0.0, 1.0, npoints)
    rvs2 = np.random.uniform(0.5, 1.0, npoints)
    rvs3 = np.random.uniform(0.0, 1.0, npoints)
    rvs4 = np.random.uniform(0.0, 1.0, npoints)
    rvs5 = np.random.uniform(0.5, 1.0, npoints)
    volume = 0.5**2 / npoints

    def mcLikelihood(cube, ndim, nparams):
        def likelihood(s1, s2, s3, s4, s5):
            signal = events_gen(
                [cube[0], cube[1], cube[2], cube[3], s1, s2, s3, s4, s5])
            return np.sum(log(pois(signal, obs) + 1.0)) + log(
                systGaus(s1) * systGaus(s2) * systGaus(s3) * systGaus(s4) *
                systGaus(s5))

        ll = np.sum([
            likelihood(rvs1[i], rvs2[i], rvs3[i], rvs4[i], rvs5[i])
            for i in range(0, 50)
        ]) * volume
        return ll

    save_str = "syst_marginalized"
    out_str = "multinest/" + save_str + "/" + save_str
    json_str = "multinest/" + save_str + "/params.json"

    # Run the sampler with CEvNS, BRN, and SS.
    pymultinest.run(mcLikelihood,
                    prior,
                    9,
                    outputfiles_basename=out_str,
                    resume=False,
                    verbose=True,
                    n_live_points=2000,
                    evidence_tolerance=0.5,
                    sampling_efficiency=0.8)

    # Save the parameter names to a JSON file.
    params = ["cevns_norm", "ss_norm", "BRN_prompt_norm", "BRN_delayed_norm"]
    json.dump(params, open(json_str, 'w'))
コード例 #22
0
    def run(self, outputfiles_basename, resume=False, verbose=True):

        pymultinest.run(self.log_like,
                        self.prior,
                        self.n_params,
                        outputfiles_basename=outputfiles_basename,
                        resume=resume,
                        verbose=verbose)
        a1 = Analyzer(outputfiles_basename=outputfiles_basename,
                      n_params=self.n_params)
        data = [self.energyc, self.lag, self.lag_errl, self.lag_errh]
        return Fit_plot(a1, self.parameters, data, self.model)
コード例 #23
0
ファイル: zjh_data_analysis.py プロジェクト: njugrid/my_work
def my_curvfit(function,
               prior,
               parameters,
               x,
               y,
               yerr1,
               yerr2=None,
               savedir=None,
               done=True):

    n_params = len(parameters)
    if yerr2 is not None:
        yerr2 = np.abs(yerr2)
    x = np.array(x)
    y = np.array(y)
    yerr1 = np.abs(yerr1)

    if savedir is None:
        savedir = '/home/laojin/software/my_python/curvfit/'
        if os.path.exists(savedir) == False:
            os.makedirs(savedir)
    else:
        if os.path.exists(savedir) == False:
            os.makedirs(savedir)

    def loglike(cube, ndim, nparams):

        ymodel = function(x, cube)
        if yerr2 is not None:
            err = []
            for index, value in enumerate(ymodel - y):
                if value > 0:
                    err.append(yerr2[index])
                else:
                    err.append(yerr1[index])
            err = np.array(err)
        else:
            err = yerr1

        loglikelihood = (-0.5 * ((ymodel - y) / err)**2).sum()
        return loglikelihood

    if ((os.path.exists(savedir + 'spectrum_params.json') == False) or (done)):
        pymultinest.run(loglike,
                        prior,
                        n_params,
                        outputfiles_basename=savedir + 'spectrum_',
                        resume=False,
                        verbose=True)
        json.dump(parameters, open(savedir + 'spectrum_params.json', 'w'))
    a = pymultinest.Analyzer(outputfiles_basename=savedir + 'spectrum0_',
                             n_params=n_params)
    return a
コード例 #24
0
ファイル: fit.py プロジェクト: skmqft/PriorFlow
    def __call__(self, generator, prior, nparams):
        def lgl(cube, ndim, nparams):
            n_pred = generator(cube)
            return self.loglike(self.n_obs, self.n_bg, n_pred)

        def prr(cube, ndim, nparams):
            prior(cube)

        pymultinest.run(lgl,
                        prr,
                        nparams,
                        outputfiles_basename=self.output_dir + '_',
                        **self.kwargs)
コード例 #25
0
ファイル: mcmc_lik.py プロジェクト: drdangersimon/mcmc_phylo
def run_multinest(posterior, save_file):
    """Uses multinest sampler to calculate evidence instead of emceee
    cmd is bash command to call lagrange_cpp
    posterior is posterior class, should have methods prior and lik
    save_file is path to save. will resume from file if arround"""
    # checks
    # if path exsissts
    if not os.path.exists(save_file) and mpi.COMM_WORLD.rank == 0:
        os.mkdir(save_file)
    assert hasattr(posterior, 'prior') and hasattr(posterior, 'lik'), 'must have prior and lik methods'
    # run sampler
    pymultinest.run(posterior.lik, posterior.prior, posterior.get_dim(),
                    outputfiles_basename=save_file)
コード例 #26
0
    def sample(self):

        self.pars = np.zeros(self.nParams)

        # run MultiNest
        pymultinest.run(self.logLike,
                        self.prior,
                        self.nParams,
                        importance_nested_sampling=False,
                        resume=False,
                        verbose=True,
                        sampling_efficiency='parameter',
                        n_live_points=100)
コード例 #27
0
ファイル: multinest.py プロジェクト: nmearl/pynamic-old
def generate(lmod_pars, lparams, lphoto_data, lrv_data, lncores, lfname):
    global mod_pars, params, photo_data, rv_data, ncores, fname
    mod_pars, params, photo_data, rv_data, ncores, fname = \
        lmod_pars, lparams, lphoto_data, lrv_data, lncores, lfname

    # number of dimensions our problem has
    parameters = ["{0}".format(i) for i in range(mod_pars[0] * 5 + (mod_pars[0] - 1) * 6)]
    nparams = len(parameters)

    # make sure the output directories exist
    if not os.path.exists("./output/{0}/multinest".format(fname)):
        os.makedirs(os.path.join("./", "output", "{0}".format(fname), "multinest"))

    if not os.path.exists("./output/{0}/plots".format(fname)):
        os.makedirs(os.path.join("./", "output", "{0}".format(fname), "plots"))

    if not os.path.exists("chains"): os.makedirs("chains")
    # we want to see some output while it is running
    progress_plot = pymultinest.ProgressPlotter(n_params=nparams,
                                                outputfiles_basename='output/{0}/multinest/'.format(fname))
    progress_plot.start()
    # progress_print = pymultinest.ProgressPrinter(n_params=nparams, outputfiles_basename='output/{0}/multinest/'.format(fname))
    # progress_print.start()

    # run MultiNest
    pymultinest.run(lnlike, lnprior, nparams, outputfiles_basename=u'./output/{0}/multinest/'.format(fname),
                    resume=True, verbose=True,
                    sampling_efficiency='parameter', n_live_points=1000)

    # run has completed
    progress_plot.stop()
    # progress_print.stop()
    json.dump(parameters, open('./output/{0}/multinest/params.json'.format(fname), 'w'))  # save parameter names

    # plot the distribution of a posteriori possible models
    plt.figure()
    plt.plot(photo_data[0], photo_data[1], '+ ', color='red', label='data')

    a = pymultinest.Analyzer(outputfiles_basename="./output/{0}/reports/".format(fname), n_params=nparams)

    for theta in a.get_equal_weighted_posterior()[::100, :-1]:
        params = utilfuncs.split_parameters(theta, mod_pars[0])

        mod_flux, mod_rv = utilfuncs.model(mod_pars, params, photo_data[0], rv_data[0])

        plt.plot(photo_data[0], mod_flux, '-', color='blue', alpha=0.3, label='data')

    utilfuncs.report_as_input(params, fname)

    plt.savefig('./output/{0}/plots/posterior.pdf'.format(fname))
    plt.close()
コード例 #28
0
ファイル: run.py プロジェクト: bsafdi/GCE-2FIG
    def perform_scan_multinest(self, chains_dir, nlive=100):
        """ Perform a scan with MultiNest
        """
        self.make_dirs([chains_dir])
        n_params = len(self.floated_params)
        pymultinest_options = {'importance_nested_sampling': False,
                                'resume': False, 'verbose': True,
                                'sampling_efficiency': 'model',
                                'init_MPI': False, 'evidence_tolerance': 0.5,
                                'const_efficiency_mode': False}

        pymultinest.run(self.ll, self.prior_cube, n_params, 
                        outputfiles_basename=chains_dir, 
                        n_live_points=nlive, **pymultinest_options)
コード例 #29
0
	def run_Multinest(self,Nthreads,output_filename='temporary_model'):
		'''
		This function runs Multinest using the self.my_prior priors, which need to be appropriately set to a Multinest-type of priors.
		
		Parameters
		Nthreads: int
			Number of threads per Multinest instance used by Galario to compute the model visibilities
		Output_filename: string
			Multinest output base file names. See pymultinest docs for how to read and analyze those.
		
		'''
		n_params = 10
		from galario import double
		double.threads(Nthreads)
		pymultinest.run(self.loglike_multinest, self.my_prior, n_params, outputfiles_basename=output_filename, resume = True, verbose = True,sampling_efficiency='model')
コード例 #30
0
    def _fit(self,
             model: AbstractPriorModel,
             analysis,
             log_likelihood_cap=None) -> res.Result:
        """
        Fit a model using MultiNest and the Analysis class which contains the data and returns the log likelihood from
        instances of the model, which the `NonLinearSearch` seeks to maximize.

        Parameters
        ----------
        model : ModelMapper
            The model which generates instances for different points in parameter space.
        analysis : Analysis
            Contains the data and the log likelihood function which fits an instance of the model to the data, returning
            the log likelihood the `NonLinearSearch` maximizes.

        Returns
        -------
        A result object comprising the Samples object that includes the maximum log likelihood instance and full
        set of accepted ssamples of the fit.
        """

        # noinspection PyUnusedLocal
        def prior(cube, ndim, nparams):
            # NEVER EVER REFACTOR THIS LINE! Haha.

            phys_cube = model.vector_from_unit_vector(unit_vector=cube)

            for i in range(len(phys_cube)):
                cube[i] = phys_cube[i]

            return cube

        fitness_function = self.fitness_function_from_model_and_analysis(
            model=model, analysis=analysis)

        import pymultinest

        logger.info("Beginning MultiNest non-linear search. ")

        pymultinest.run(fitness_function,
                        prior,
                        model.prior_count,
                        outputfiles_basename="{}/multinest".format(
                            self.paths.path),
                        verbose=not self.silence,
                        **self.config_dict_search)
        self.copy_from_sym()
コード例 #31
0
def test():
    def myprior(cube, ndim, nparams):
        for i in range(ndim):
            cube[i] = cube[i] * 10 * math.pi

    def myloglike(cube, ndim, nparams):
        chi = 1.
        for i in range(ndim):
            chi *= math.cos(cube[i] / 2.)
        return math.pow(2. + chi, 5)

    # number of dimensions our problem has
    parameters = ["x", "y"]
    n_params = len(parameters)

    # run MultiNest
    pymultinest.run(myloglike, myprior, n_params, resume=True, verbose=True)
コード例 #32
0
	def sample(self, nlive = 500, ceff = False, efr = 0.2, resume = False, doplot = False, sample=True):
		'''
		Function to begin sampling with model defined in Candidate File.

		nlive - number of live points for multinest (default = 500)
		ceff - use constant efficient mode (default = False)
		efr - efficiency rate (default = 0.2)
		resume - resume sampling if restarted (default = False)
		doplot - make plots after sampling (default = False)
		'''

		if(sample == True):
			pymultinest.run(self.GaussGPULikeWrap, self.MNprior, self.Cand.n_dims, n_params = self.Cand.n_params, importance_nested_sampling = False, resume = resume, verbose = True, sampling_efficiency = efr, multimodal=False, const_efficiency_mode = ceff, n_live_points = nlive, outputfiles_basename=self.ChainRoot, wrapped_params=self.Cand.wrapped)

		self.loadChains()
		if(doplot == True):
			self.plotResult()
コード例 #33
0
ファイル: hi_multinest.py プロジェクト: ska-sa/hibayes
def main():
    """
    """

    # Set up MPI variables
    world=MPI.COMM_WORLD
    rank=world.rank
    size=world.size
    master = rank==0

    if master:
        print "Runtime parameters"
        pprint.pprint(rp)
        time.sleep(2)

        if not os.path.exists(rp["outdir"]):
            try:
                os.mkdir(rp["outdir"])
            except:
                pass

    n_params = rp["nc_fit"] + 3

    #progress = pymultinest.ProgressPlotter(n_params=n_params,  interval_ms=10000,
    #                                       outputfiles_basename=rp["outputfiles_basename"])
    #progress.start()
    
    pymultinest.run(loglike, logprior, n_params, resume=False, verbose=True,
                    multimodal=rp["multimodal"], max_modes=rp["max_modes"], write_output=True,
                    n_live_points=rp["n_live_points"],
                    evidence_tolerance=rp["evidence_tolerance"],
                    mode_tolerance=rp["mode_tolerance"],
                    seed=rp["seed"],
                    max_iter=rp["max_iter"],
                    importance_nested_sampling=rp["do_ins"],
                    outputfiles_basename=rp["outputfiles_basename"],\
                    init_MPI=False)

    if master:
        # Copy the config.ini file to the output dir
        shutil.copy(param_file,rp["outdir"])

    #progress.stop()

    return 0
コード例 #34
0
def main():
    """
    """

    # Set up MPI variables
    world=MPI.COMM_WORLD
    rank=world.rank
    size=world.size
    master = rank==0

    if master:
        print("Runtime parameters")
        pprint.pprint(rp)
        time.sleep(2)

        if not os.path.exists(rp["outdir"]):
            try:
                os.mkdir(rp["outdir"])
            except:
                pass

    n_params = rp["nc_fit"] + 3

    #progress = pymultinest.ProgressPlotter(n_params=n_params,  interval_ms=10000,
    #                                       outputfiles_basename=rp["outputfiles_basename"])
    #progress.start()
    
    pymultinest.run(loglike, logprior, n_params, resume=False, verbose=True,
                    multimodal=rp["multimodal"], max_modes=rp["max_modes"], write_output=True,
                    n_live_points=rp["n_live_points"],
                    evidence_tolerance=rp["evidence_tolerance"],
                    mode_tolerance=rp["mode_tolerance"],
                    seed=rp["seed"],
                    max_iter=rp["max_iter"],
                    importance_nested_sampling=rp["do_ins"],
                    outputfiles_basename=rp["outputfiles_basename"],\
                    init_MPI=False)

    if master:
        # Copy the config.ini file to the output dir
        shutil.copy(param_file,rp["outdir"])

    #progress.stop()

    return 0
コード例 #35
0
def main():
    """
    """

    if not os.path.exists(outdir): DIR(outdir)

    n_params = 6
    pymultinest.run(myloglike,myprior,n_params,resume=False,verbose=True,\
                multimodal=multimodal,max_modes=max_modes,write_output=True,\
                n_live_points=n_live_points,\
                evidence_tolerance=evidence_tolerance,\
                mode_tolerance=-1e90,seed=seed,max_iter=max_iter,\
                importance_nested_sampling=do_INS,\
                outputfiles_basename=os.path.join(outdir,outstem),init_MPI=False)

    #contour_plot.contourTri(pylab.loadtxt('chains_test/1-post_equal_weights.dat'),line=True,outfile='chains_test/test.png',col=('red','blue'),labels=parameters,binsize=50,truth=plotTruth,ranges=plotRanges,autoscale=True)

    return 0
コード例 #36
0
    def run(self, clean_up=None, **kwargs):

        if clean_up is None:
            if self.run_dir is None:
                clean_up = True
            else:
                clean_up = False

        if self.run_dir is None:
            run_dir = tempfile.mkdtemp()
        else:
            run_dir = self.run_dir

        basename = self.prepare_fit_directory(run_dir, self.prefix)

        start_time = time.time()

        logger.info('Starting fit in {0} with prefix {1}'.format(
            run_dir, self.prefix))
        pymultinest.run(self.likelihood.multinest_evaluate,
                        self.priors.prior_transform,
                        self.n_params,
                        outputfiles_basename='{0}_'.format(basename),
                        **kwargs)

        logger.info("Fit finished - took {0:.2f} s".format(time.time() -
                                                           start_time))
        fitted_parameter_names = [
            item for item in self.likelihood.param_names
            if not self.likelihood.fixed[item]
        ]

        self.result = MultiNestResult.from_multinest_basename(
            basename, fitted_parameter_names)

        if clean_up:
            logger.info("Cleaning up - deleting {0}".format(run_dir))
            shutil.rmtree(run_dir)
        else:
            logger.info("Multinest files can be found in {0}".format(run_dir))

        self.likelihood.parameters[~self.likelihood.fixed_mask()] = (
            self.result.median.values)
        return self.result
コード例 #37
0
ファイル: fitting.py プロジェクト: kflana1/fabry_perot
def find_maximum_3(x, y, y_sd, basename, plotit=False):
    """
    guass fit using multinest
    """

    A_lim = [0.8 * y.max(), 1.2 * y.max()]
    X0_lim = [x[0], x[-1]]
    S_lim = [0.5 * (x[-1]**2 - x[0]**2), 3 * (x[-1]**2 - x[0]**2)]

    def Prior(cube, ndim, nparams):
        cube[0] = cube[0] * (A_lim[1] - A_lim[0]) + A_lim[0]
        cube[1] = cube[1] * (X0_lim[1] - X0_lim[0]) + X0_lim[0]
        cube[2] = cube[2] * (S_lim[1] - S_lim[0]) + S_lim[0]

    def LogLikelihood(cube, ndim, nparams):
        fit = _gauss(cube, x)
        chisq = np.sum((fit - y)**2 / y_sd**2)
        return -chisq / 2.0

    nparams = 3
    pymultinest.run(LogLikelihood,
                    Prior,
                    nparams,
                    resume=False,
                    outputfiles_basename=basename)

    result = pymultinest.analyse.Analyzer(nparams,
                                          outputfiles_basename=basename)

    samples = result.get_equal_weighted_posterior()

    pks = samples[:, 1]
    pk = np.mean(pks)
    pk_err = np.std(pks)

    if plotit:
        f, ax = plt.subplots(1, 2)
        ax[0].errorbar(x, y, yerr=y_sd, fmt='.', color='C0')
        ax[0].axvspan(pk - pk_err, pk + pk_err, color='C1', alpha=0.3)
        my_hist(ax[1], pks)
        plt.show()

    return pk, pk_err
コード例 #38
0
def pelim(infile,doopt,spar,sopt,gpar,gopt,gpix, psfpar,\
         crval=[0.,0.],cdelt=[1.,1.],crpix=[0.,0.],noise=1.0, comps=None):
    if len(np.ravel(spar))==12:
        snames = np.array(['SX1','SY1','SF1','SW1','SE1','SPA1']) #,'SX2','SY2','SF2','SW2','SE2','SPA2'
    else: 
        snames = np.array(['SX1','SY1','SF1','SW1','SE1','SPA1'])
    gnames = ['GX','GY','GB','GE','GPA','GSM','GSA']
    global b
    b=getdata(infile)
    
    b=b[0][0] if b.ndim==4 else b
    
    global lin, olin, x0
    
    lin = np.append(np.ravel(spar),np.ravel(gpar))
    olin = np.append(np.ravel(sopt),np.ravel(gopt))
    x0 = lin[olin]
    
    global parameters
    both_names = np.append(np.ravel(snames),np.ravel(gnames))
    parameters = both_names[olin]

    global last_retval
    last_retval = None
    if doopt:
        args=(spar,sopt,gpar,gopt,gpix, psfpar,crval,cdelt,crpix,\
                      b,False,noise,False, comps, lin)
        n_params = len(parameters)
        print ('n_params:', n_params)
        datafile = 'out/1115'
        print (parameters)
        # run MultiNest
        pymultinest.run(loglike, prior, n_params, outputfiles_basename=datafile + '_1_', resume = False, verbose = True)
        json.dump(parameters.tolist(), open(datafile + '_1_params.json', 'w')) # save parameter names
        
        #xopt= lbfgs(optim_func, x0, args=args, approx_grad = 1)
        #xopt= fmin(optim_func, x0, args=args,     maxfun=10000)

    else:   
        optim_func(x0,spar,sopt,gpar,gopt,gpix, psfpar,crval,cdelt,crpix,\
                      b,False,noise,True, comps, lin)
    ospar,ogpar = replace_x0 (spar,gpar,sopt,gopt,xopt if doopt else x0)
コード例 #39
0
def run_nested(spec, model, basename='run/test_run'):
    pymultinest.run(
        model.loglikelihood,
        model.prior_transform,
        model.n_params,
        outputfiles_basename=basename,
        resume=False,
        verbose=True,
        evidence_tolerance=0.3,
        n_live_points=400,
        sampling_efficiency=0.3,
        n_iter_before_update=2000,
    )
    analyzer = pymultinest.Analyzer(
        outputfiles_basename=basename,
        n_params=model.n_params,
    )
    lnZ = analyzer.get_stats()['global evidence']
    print(':: Evidence Z:', lnZ / np.log(10))
    return analyzer
コード例 #40
0
ファイル: jointLikelihood.py プロジェクト: cdr397/3ML
 def multinest(self,*args,**kwargs):
   import pymultinest
   
   #res                       = self.fit(False,True)
   
   #f                         = open("calls.txt","w+")
   
   self.freeParameters       = self.modelManager.getFreeParameters()
       
   def prior(cube, ndim, nparams):
     for i,p in enumerate(self.freeParameters.values()):
       cube[i]               = p.prior.multinestCall(cube[i]) 
     pass
   pass
       
   def loglike(cube, ndim, nparams):
     logL                    = self.minusLogLike(cube)*(-1)
     if(numpy.isnan(logL)):
       logL                  = -1e10
     #f.write(" ".join(map(lambda x:"%s" %x,cube[:ndim])))
     #f.write(" %s\n" % logL)
     return logL
   pass
   
   if('verbose' not in kwargs):
     kwargs['verbose']       = True
   if('resume' not in kwargs):
     kwargs['resume']        = False
   if('outputfiles_basename' not in kwargs):
     kwargs['outputfiles_basename'] = '_1_'
   pass
   kwargs['log_zero']        = -1e9
   pymultinest.run(loglike, prior, len(self.freeParameters), *args, **kwargs)
   print("done")
   
   #Collect the samples
   analyzer                   = pymultinest.Analyzer(n_params=len(self.freeParameters),outputfiles_basename=kwargs['outputfiles_basename'])
   
   eqw                        = analyzer.get_equal_weighted_posterior()
   self.samples               = eqw[:,:-1]
   self.posteriors            = eqw[:,-1]
コード例 #41
0
ファイル: gravimage.py プロジェクト: PascalSteger/darcoda
def run(gp):
    pymultinest.run(myloglike, myprior, gp.ndim, n_params = gp.ndim+1,
                    n_clustering_params = gp.nrho, # gp.ndim, or separate modes on the rho parameters only: gp.nrho
                    wrapped_params = [ gp.pops, gp.nipol, gp.nrho],
                    importance_nested_sampling = False, # INS enabled
                    multimodal = False,           # separate modes
                    const_efficiency_mode = True, # use const sampling efficiency
                    n_live_points = gp.nlive,
                    evidence_tolerance = 0.0,   # 0 to keep algorithm working indefinitely
                    sampling_efficiency = 0.05, # 0.05, MultiNest README for >30 params
                    n_iter_before_update = 2,  # output after this many iterations
                    null_log_evidence = -1e100,
                    max_modes = gp.nlive, # preallocation of modes: max=number of live points
                    mode_tolerance = -1.e100,   # mode tolerance in the case where no special value exists: highly negative
                    outputfiles_basename = gp.files.outdir,
                    seed = -1, verbose = True,
                    resume = gp.restart,
                    context = 0, write_output = True,
                    log_zero = -1e500, # points with log likelihood<log_zero will be neglected
                    max_iter = 0, # set to 0 for never reaching max_iter (no stopping criterium based on number of iterations)
                    init_MPI = False, dump_callback = None)
コード例 #42
0
ファイル: gravlite.py プロジェクト: sofiasi/darcoda
def run():
    import gl_file   as gfile
    if gp.getnewdata:
        gfile.bin_data()
    gfile.get_data()
    
    ## number of dimensions
    n_dims = gp.nepol + gp.pops*gp.nepol + gp.pops*gp.nbeta #rho, (nu, beta)_i
    parameters = stringlist(gp.pops, gp.nepol)
    
    # show live progress
    # progress = pymultinest.ProgressPlotter(n_params = n_dims)
    # progress.start()
    # threading.Timer(2, show, [gp.files.outdir+'/phys_live.points.pdf']).start() 

    # print(str(len(gp.files.outdir))+': len of gp.files.outdir')
    pymultinest.run(myloglike,   myprior,
                    n_dims,      n_params = n_dims, # None beforehands
                    n_clustering_params = gp.nepol, # separate modes on the rho parameters only
                    wrapped_params = None,          # do not wrap-around parameters
                    importance_nested_sampling = True, # INS enabled
                    multimodal = True,  # separate modes
                    const_efficiency_mode = True, # use const sampling efficiency
                    n_live_points = gp.nlive,
                    evidence_tolerance = 0.0, # set to 0 to keep algorithm working indefinitely
                    sampling_efficiency = 0.80,
                    n_iter_before_update = gp.nlive, # output after this many iterations
                    null_log_evidence = -1, # separate modes if logevidence > this param.
                    max_modes = gp.nlive,   # preallocation of modes: maximum = number of live points
                    mode_tolerance = -1.,
                    outputfiles_basename = gp.files.outdir,
                    seed = -1,
                    verbose = True,
                    resume = False,
                    context = 0,
                    write_output = True,
                    log_zero = -1e6,
                    max_iter = 10000000,
                    init_MPI = True,
                    dump_callback = None)
コード例 #43
0
ファイル: base.py プロジェクト: specgrid/starkit
    def run(self, clean_up=None, **kwargs):

        if clean_up is None:
            if self.run_dir is None:
                clean_up = True
            else:
                clean_up = False

        if self.run_dir is None:
            run_dir = tempfile.mkdtemp()
        else:
            run_dir = self.run_dir

        basename = self.prepare_fit_directory(run_dir, self.prefix)

        start_time = time.time()

        logger.info('Starting fit in {0} with prefix {1}'.format(run_dir, self.prefix))
        pymultinest.run(self.likelihood.multinest_evaluate, self.priors.prior_transform,
                        self.n_params,
                        outputfiles_basename='{0}_'.format(basename),
                        **kwargs)

        logger.info("Fit finished - took {0:.2f} s"
                    .format(time.time() - start_time))
        fitted_parameter_names = [item for item in self.likelihood.param_names
                                  if not self.likelihood.fixed[item]]

        self.result = MultiNestResult.from_multinest_basename(
            basename, fitted_parameter_names)

        if clean_up:
            logger.info("Cleaning up - deleting {0}".format(run_dir))
            shutil.rmtree(run_dir)
        else:
            logger.info("Multinest files can be found in {0}".format(run_dir))

        self.likelihood.parameters[~self.likelihood.fixed_mask()] = (
            self.result.median.values)
        return self.result
コード例 #44
0
ファイル: multinest_fitter.py プロジェクト: specgrid/specgrid
    def run(self, clean_up=None, **kwargs):

        if clean_up is None:
            if self.run_dir is None:
                clean_up = True
            else:
                clean_up = False


        if self.run_dir is None:
            run_dir = tempfile.mkdtemp()
        else:
            run_dir = self.run_dir

        basename = self.prepare_fit_directory(run_dir, self.prefix)


        start_time = time.time()

        logger.info('Starting fit in {0} with prefix {1}'.format(run_dir, self.prefix))
        pymultinest.run(self.likelihood, self.priors.prior_transform,
                        self.n_params,
                        outputfiles_basename='{0}_'.format(basename),
                        **kwargs)

        logger.info("Fit finished - took {0:.2f} s"
                    .format(time.time() - start_time))

        self.result = MultinestResult.from_multinest_basename(
            basename, self.likelihood.param_names)

        if clean_up == True:
            logger.info("Cleaning up - deleting {0}".format(run_dir))
            shutil.rmtree(run_dir)

        return self.result
コード例 #45
0
		try:
			loglike = kg_loglikelihood(cube,my_observable,my_error,a)
			return loglike
		except:
			return -np.inf 

	parameters = ['Separation','Position Angle','Contrast']
	n_params = len(parameters)

	resume=False
	eff=0.3
	multi=True,
	max_iter= 0
	ndim = n_params

	pymultinest.run(myloglike_kg, myprior, n_params, wrapped_params=[1],
		verbose=True,resume=False)

	thing = pymultinest.Analyzer(n_params = n_params)
	s = thing.get_stats()

	this_j = trial

	kseps[this_j], dkseps[this_j] = s['marginals'][0]['median'], s['marginals'][0]['sigma']
	kthetas[this_j], dkthetas[this_j] = s['marginals'][1]['median'], s['marginals'][1]['sigma']
	kcons[this_j], dkcons[this_j] = s['marginals'][2]['median'], s['marginals'][2]['sigma']
	
	stuff = thing.get_best_fit()
	best_params = stuff['parameters']
	model_vises = np.sqrt(pysco.binary_model(best_params,a,hdr,vis2=True))
	model_kervises = np.dot(KerGain,model_vises-1.)
コード例 #46
0
ファイル: trades_pymnest.py プロジェクト: lucaborsato/trades
def main():
  # MAIN -- TRADES + pyMultiNest
  # ---
  # initialize logger
  logger = logging.getLogger("Main_log")
  logger.setLevel(logging.DEBUG)
  formatter = logging.Formatter("%(asctime)s - %(message)s")

  # READ COMMAND LINE ARGUMENTS
  cli = get_args()

  # STARTING TIME
  start = time.time()

  # RENAME 
  working_path = cli.full_path
  #nthreads=cli.nthreads

  log_file = os.path.join(working_path, '%s_log.txt' %(os.path.dirname(cli.sub_folder)))

  flog = logging.FileHandler(log_file, 'w')
  flog.setLevel(logging.DEBUG)
  flog.setFormatter(formatter)
  logger.addHandler(flog)
  # log screen
  slog = logging.StreamHandler()
  slog.setLevel(logging.DEBUG)
  slog.setFormatter(formatter)
  logger.addHandler(slog)


  fitting_priors, fitting_priors_type = read_priors(os.path.join(working_path, 'fitting_priors.dat'))
  derived_priors, derived_priors_type = read_priors(os.path.join(working_path, 'derived_priors.dat'))
  n_der_priors = len(derived_priors)


  # INITIALISE TRADES WITH SUBROUTINE WITHIN TRADES_LIB -> PARAMETER NAMES, MINMAX, INTEGRATION ARGS, READ DATA ...
  pytrades_lib.pytrades.initialize_trades(working_path, cli.sub_folder, 1)

  # RETRIEVE DATA AND VARIABLES FROM TRADES_LIB MODULE
  fitting_parameters = pytrades_lib.pytrades.fitting_parameters # INITIAL PARAMETER SET (NEEDED ONLY TO HAVE THE PROPER ARRAY/VECTOR)
  parameters_minmax = pytrades_lib.pytrades.parameters_minmax # PARAMETER BOUNDARIES
  delta_parameters = np.abs(parameters_minmax[:,1] - parameters_minmax[:,0]) # DELTA BETWEEN MAX AND MIN OF BOUNDARIES

  n_bodies = pytrades_lib.pytrades.n_bodies # NUMBER OF TOTAL BODIES OF THE SYSTEM
  n_planets = n_bodies - 1 # NUMBER OF PLANETS IN THE SYSTEM
  ndata = pytrades_lib.pytrades.ndata # TOTAL NUMBER OF DATA AVAILABLE
  npar  = pytrades_lib.pytrades.npar # NUMBER OF TOTAL PARAMATERS ~n_planets X 6
  nfit  = pytrades_lib.pytrades.nfit # NUMBER OF PARAMETERS TO FIT
  nfree  = pytrades_lib.pytrades.nfree # NUMBER OF FREE PARAMETERS (ie nrvset)
  dof   = pytrades_lib.pytrades.dof # NUMBER OF DEGREES OF FREEDOM = NDATA - NFIT
  global inv_dof
  #inv_dof = np.float64(1.0 / dof)
  inv_dof = pytrades_lib.pytrades.inv_dof

  str_len = pytrades_lib.pytrades.str_len
  temp_names = pytrades_lib.pytrades.get_parameter_names(nfit,str_len)
  trades_names = anc.convert_fortran_charray2python_strararray(temp_names)
  parameter_names = anc.trades_names_to_emcee(trades_names)

  # RADIAL VELOCITIES SET
  n_rv = pytrades_lib.pytrades.nrv
  n_set_rv = pytrades_lib.pytrades.nrvset

  # TRANSITS SET
  n_t0 = pytrades_lib.pytrades.nt0
  n_t0_sum = pytrades_lib.pytrades.ntts
  n_set_t0 = 0
  for i in range(0, n_bodies-1):
    if (n_t0[i] > 0): n_set_t0 += 1


  # compute global constant for the loglhd
  global ln_err_const

  #try:
    #e_RVo = np.asarray(pytrades_lib.pytrades.ervobs[:], dtype=np.float64) # fortran variable RV in python will be rv!!!
  #except:
    #e_RVo = np.asarray([0.], dtype=np.float64)
  #try:
    #e_T0o = np.asarray(pytrades_lib.pytrades.et0obs[:,:], dtype=np.float64).reshape((-1))
  #except:
    #e_T0o = np.asarray([0.], dtype=np.float64)
  #ln_err_const = anc.compute_ln_err_const(ndata, dof, e_RVo, e_T0o, cli.ln_flag)
  ln_err_const = pytrades_lib.pytrades.ln_err_const
  
  # READ THE NAMES OF THE PARAMETERS FROM THE TRADES_LIB AND CONVERT IT TO PYTHON STRINGS
  #reshaped_names = pytrades_lib.pytrades.parameter_names.reshape((10,nfit), order='F').T
  #parameter_names = [''.join(reshaped_names[i,:]).strip() for i in range(0,nfit)]

  # INITIALISE SCRIPT FOLDER/LOG FILE
  working_folder, run_log, of_run = init_folder(working_path, cli.sub_folder)

  logger.info('')
  logger.info('==================== ')
  logger.info('pyTRADES-pyMultiNest')
  logger.info('==================== ')
  logger.info('')
  logger.info('WORKING PATH = %s' %(working_path))
  logger.info('dof = ndata(%d) - nfit(%d) = %d' %(ndata, nfit, dof))
  logger.info('Total N_RV = %d for %d set(s)' %(n_rv, n_set_rv))
  logger.info('Total N_T0 = %d for %d out of %d planet(s)' %(n_t0_sum, n_set_t0, n_planets))
  logger.info('%s = %.7f' %('log constant error = ', ln_err_const))

  #of_run.write(' dof = ndata(%d) - nfit(%d) = %d\n' %(ndata, nfit, dof))
  #of_run.write(' Total N_RV = %d for %d set(s)\n' %(n_rv, n_set_rv))
  #of_run.write(' Total N_T0 = %d for %d out of %d planet(s)\n' %(n_t0_sum, n_set_t0, n_planets))
  #of_run.write(' %s = %.7f\n' %('log constant error = ', ln_err_const))

  # save parameter_names and boundaries to be read by a script
  trades_hdf5 = h5py.File(os.path.join(working_folder, 'system_summary.hdf5'), 'w')
  trades_hdf5.create_dataset('parameter_names', data=parameter_names, dtype='S10')
  trades_hdf5.create_dataset('parameters_minmax', data=parameters_minmax, dtype=np.float64)
  trades_hdf5.create_dataset('ln_err_const', data=np.asarray([ln_err_const], dtype=np.float64), dtype=np.float64)
  trades_hdf5.close()

  # MULTINEST HERE
  #output_mnest = os.path.join(working_folder, 'trades_mnest_')
  #output_mnest = os.path.join(cli.sub_folder, 'trades_mnest_')
  output_mnest = 'trades_mnest_'
  os.chdir(working_folder)
  log_zero_value = -0.5*1.e8
  seed_value = convert_to_int(cli.seed_value)
  #seed_value = 392919
  #n_pop = nfit+int(nfit*0.5)
  n_pop = convert_to_int(cli.n_pop)
  if( n_pop < nfit): n_pop = nfit*10
  n_update = 5 # by argument
  resume_flag = cli.resume_flag
  multi_node_flag = True
  mpi_onoff = False
  logger.info('pyMultiNest parameters:')
  logger.info('folder = %s' %(output_mnest))
  logger.info('seed_value = %d , n_pop = %d , n_update = %d' %(seed_value, n_pop, n_update))
  logger.info('resume_flag = %r , multi_node_flag = %r, mpi_onoff = %r' %(resume_flag, multi_node_flag, mpi_onoff))
  #of_run.write(' pyMultiNest parameters:\n')
  #of_run.write(' folder = %s\n seed_value = %d , n_pop = %d , n_update = %d\n resume_flag = %r , multi_node_flag = %r, mpi_onoff = %r\n' %(output_mnest, seed_value, n_pop, n_update, resume_flag, multi_node_flag, mpi_onoff))

  #
  # RESCALE PARAMETERS FUNCTION NEEDED BY LNLIKE
  #
  def trades_rescale(fitting_parameters, ndim, nparams):
    for i in range(0,ndim):
      fitting_parameters[i] = parameters_minmax[i,0] + fitting_parameters[i]*delta_parameters[i]
    return fitting_parameters

  # LNPRIOR TO BE ADDED TO LOGLHD
  # it can use all the variables defined before this point!
  def lnprior(fitting_parameters, ndim):
    lnprior_value = 0.
    i_der = 0
    for i in range(0, ndim):
      #print i,parameter_names[i], fitting_priors_type[i]
      ln_temp = 0.
      
      # calculate the LogLikelihood<->prior of fitting parameter
      if(fitting_priors_type[i][1].lower() == 'g'):
        ln_temp = -0.5*(((fitting_parameters[i]-fitting_priors[i][0])/fitting_priors[i][1])**2)
        lnprior_value = lnprior_value + ln_temp
        #print '%18.12f %18.12f (%18.12f) => ln = %18.12f' %(fitting_parameters[i], fitting_priors[i][0], fitting_priors[i][1], ln_temp)
      
      # calculate the LogLikelihood<->prior of derived parameter
      if('mA' in parameter_names[i]):
        ln_temp = 0.
        ecc = np.sqrt(fitting_parameters[i-2]**2 + fitting_parameters[i-1]**2)
        if (ecc <= 0.):
          ecc = np.finfo(float).eps
        elif (ecc > 1.):
          ecc = 1.- np.finfo(float).eps
        # ecc prior
        if(derived_priors_type[i_der][1].lower() == 'g'):
          ln_temp = -0.5*(((ecc-derived_priors[i_der][0])/derived_priors[i_der][1])**2)
          lnprior_value = lnprior_value + ln_temp
          #print derived_priors_type[i_der]
          #print '%18.12f %18.12f (%18.12f) => ln = %18.12f' %(ecc, derived_priors[i_der][0], derived_priors[i_der][1], ln_temp)
        # phi prior
        if(derived_priors_type[i_der+1][1].lower() == 'g'):
          if(ecc <= np.finfo(float).eps):
            argp = 90.
          else:
            argp = ((np.arctan2(fitting_parameters[i-1], fitting_parameters[i-2])*180./np.pi)+360.)%360.
          phi = (argp + fitting_parameters[i] + 360.)%360.
          ln_temp = 0.
          ln_temp = -0.5*(((phi-derived_priors[i_der+1][0])/derived_priors[i_der+1][1])**2)
          lnprior_value = lnprior_value + ln_temp
          #print derived_priors_type[i_der+1]
          #print '%18.12f (argp[%18.12f]+mA[%18.12f]) %18.12f (%18.12f) => ln = %18.12f' %(phi, argp, fitting_parameters[i], derived_priors[i_der+1][0], derived_priors[i_der+1][1], ln_temp)
          i_der = i_der + 2
      
    return lnprior_value

  # LNLIKEHOOD FUNCTION NEEDED BY MULTINEST
  def lnlike(fitting_parameters, ndim, nparams):
    loglhd = 0.
    check = 1
    trades_parameters = np.asarray([fitting_parameters[i] for i in range(0,ndim)], dtype=np.float64)
    loglhd, check = pytrades_lib.pytrades.fortran_loglikelihood(trades_parameters)
    if ( check == 0 ):
      loglhd = -0.5e10
    else:
      lnprior_value = lnprior(fitting_parameters, ndim)
      #lnprior_value = 0.0
      loglhd = loglhd + ln_err_const + lnprior_value # ln_err_const (global variable) & lnprior_value
    
    return loglhd


  # run MultiNest
  pymultinest.run(LogLikelihood = lnlike, Prior = trades_rescale, n_dims = nfit, n_params = nfit, outputfiles_basename=output_mnest, multimodal = multi_node_flag, log_zero=log_zero_value, seed = seed_value, n_live_points = n_pop, n_iter_before_update = n_update, resume = resume_flag, verbose = True, init_MPI = mpi_onoff)

  elapsed = time.time() - start
  elapsed_d, elapsed_h, elapsed_m, elapsed_s = computation_time(elapsed)

  logger.info('')
  logger.info('pyTRADES: pyMultiNest FINISHED in %2d day %02d hour %02d min %.2f sec - bye bye' %(int(elapsed_d), int(elapsed_h), int(elapsed_m), elapsed_s))
  logger.info('')

  #of_run.write(' pyTRADES: pyMultiNest FINISHED in %2d day %02d hour %02d min %.2f sec - bye bye\n' %(int(elapsed_d), int(elapsed_h), int(elapsed_m), elapsed_s))
  #of_run.close()
  pytrades_lib.pytrades.deallocate_variables()

  return
コード例 #47
0
def nested_run(id=None, otherids=(), prior = None, parameters = None, 
	sampling_efficiency = 0.3, evidence_tolerance = 0.5,
	n_live_points = 400, outputfiles_basename = 'chains/', **kwargs):
	"""
	Run the Bayesian analysis with specified parameters+transformations.

	:param id: See the sherpa documentation of calc_stat.
	:param otherids: See the sherpa documentation of calc_stat.
	:param prior: prior function created with create_prior_function.
	:param parameters: List of parameters to analyse.
	:param outputfiles_basename: prefix for output filenames.
	
	If prior is None, uniform priors are used on the passed parameters.
	If parameters is also None, all thawed parameters are used.

	The remainder are multinest arguments (see PyMultiNest and MultiNest documentation!)
	n_live_points: 400 are often enough
	
	For quick results, use sampling_efficiency = 0.8, n_live_points = 50, 
	evidence_tolerance = 5. 
	The real results must be estimated with sampling_efficiency = 0.3,
	otherwise it is not reliable.
	"""
	fit = ui._session._get_fit(id=id, otherids=otherids)[1]

	if not isinstance(fit.stat, (Cash, CStat)):
		raise RuntimeError("Fit statistic must be cash or cstat, not %s" %
			fit.stat.name)
    
	if parameters is None:
		parameters = fit.model.thawedpars
	def log_likelihood(cube, ndim, nparams):
		try:
			for i, p in enumerate(parameters):
				assert not isnan(cube[i]), 'ERROR: parameter %d (%s) to be set to %f' % (i, p.fullname, cube[i])
				p.val = cube[i]
				#print "%s: %f" % (p.fullname,p.val),
			l = -0.5*fit.calc_stat()
			#print "%.1f" % l
			return l
		except Exception as e:
			print 'Exception in log_likelihood function: ', e
			for i, p in enumerate(parameters):
				print '    Parameter %10s: %f --> %f [%f..%f]' % (p.fullname, p.val, cube[i], p.min, p.max)
			import sys
			sys.exit(-127)
		return -1e300


	if prior is None:
		prior = create_prior_function(id=id, otherids=otherids, parameters = parameters)
	n_params = len(parameters)
	pymultinest.run(log_likelihood, prior, n_params, 
		sampling_efficiency = sampling_efficiency, n_live_points = n_live_points, 
		outputfiles_basename = outputfiles_basename, evidence_tolerance=evidence_tolerance,
		**kwargs)

	import json
	m = ui._session._get_model(id)
	paramnames = map(lambda x: x.fullname, parameters)
	json.dump(paramnames, file('%sparams.json' % outputfiles_basename, 'w'), indent=2)
コード例 #48
0
	def myloglike_kg(cube,ndim,n_params):
		try:
			loglike = kg_loglikelihood(cube,my_observable,my_error,a)
			return loglike
		except:
			return -np.inf 

	parameters = ['Semi-major axis','Eccentricity','Position Angle', 'Thickness','Contrast']
	n_params = len(parameters)
	resume=False
	eff=0.3
	multi=True,
	max_iter= 10000
	ndim = n_params

	pymultinest.run(myloglike_kg, myprior, n_params,wrapped_params=[2],
		verbose=False,resume=False,max_iter=max_iter)

	thing = pymultinest.Analyzer(n_params = n_params)
	try:
		s = thing.get_stats()

		ksemis[trial], dksemis[trial] = s['marginals'][0]['median']/4., s['marginals'][0]['sigma']/4.
		keccs[trial], dkeccs[trial] = s['marginals'][1]['median'], s['marginals'][1]['sigma']
		kthetas[trial], dkthetas[trial] = s['marginals'][2]['median'], s['marginals'][2]['sigma']
		kthicks[trial], dkthicks[trial] = s['marginals'][3]['median'], s['marginals'][3]['sigma']
		kcons[trial], dkcons[trial] = s['marginals'][4]['median'], s['marginals'][4]['sigma']

		stuff = thing.get_best_fit()
		best_params = stuff['parameters']
		print 'Best params (kg):', best_params
コード例 #49
0
def multinest_run(n_live_points=1000, 
                  target = 'ob110022', saveto=multiNest_saveto):
	
	
    def priors(cube, ndim, nparams):
        return   
	
    def likelihood(cube, ndim, nparams):

        lnlikePhot = 0.0
        lnlike_nonPhot = 0.0
        parnames = Photpars['parnames']
        
        # Photometric params
        if MultiDimPriors == True:
            params, lnlikePhot = random_photpar_multidim(cube[0],
                                                         Photpars['MultiDimPrior_Bins'],
                                                         Photpars['MultiDimPrior'][:,0],
                                                         Photpars['MultiDimPrior'][:,1],
                                                         Photpars['MultiDimPrior'][:, -nPhotpars:])
            for i in range(nPhotpars): cube[i] = params[i]
        else:
            for i in range(nPhotpars):
                param, ln_prob_param = random_photpar(cube[i],
                                                      Photpars[parnames[i]][:,0],
                                                      Photpars[parnames[i]][:,1],
                                                      Photpars[parnames[i]][:,2])
                cube[i]=param
                lnlikePhot += ln_prob_param

        idx = nPhotpars

        # x Position at t0:
        thetaS0x, ln_prob_thetaS0x = random_thetaS0x(cube[idx])
        cube[idx] = thetaS0x
        idx += 1
        lnlike_nonPhot += ln_prob_thetaS0x

        # y Position at t0:
        thetaS0y, ln_prob_thetaS0y = random_thetaS0y(cube[idx])
        cube[idx] = thetaS0y
        idx += 1
        lnlike_nonPhot += ln_prob_thetaS0y

        # Source proper motion (x dimension)
        muSx, ln_prob_muSx = random_muSx(cube[idx])
        cube[idx] = muSx
        idx += 1
        lnlike_nonPhot += ln_prob_muSx

        # Source proper motion (y dimension)
        muSy, ln_prob_muSy = random_muSy(cube[idx])
        cube[idx] = muSy
        idx += 1
        lnlike_nonPhot += ln_prob_muSy

        # Source-lens relative proper motion (x dimension)
        muRelx, ln_prob_muRelx = random_muRelx(cube[idx])
        cube[idx] = muRelx
        idx += 1
        lnlike_nonPhot += ln_prob_muRelx

        # Source-lens relative proper motion (y dimension)
        muRely, ln_prob_muRely = random_muRely(cube[idx])
        cube[idx] = muRely
        idx += 1
        lnlike_nonPhot += ln_prob_muRely


        t0 = cube[0]
        beta = cube[1]
        tE = cube[2]
        piEN = cube[3]
        piEE= cube[4]


        #Create astrometric model of source
        thetaS_model, thetaE_amp, M, shift, thetaS_nolens = MCMC_LensModel.LensModel_Trial1(tobs, t0, tE,
                                                                                            [thetaS0x, thetaS0y],
                                                                                            [muSx,muSy],
                                                                                            [muRelx, muRely],
                                                                                            beta,
                                                                                            [piEN, piEE])
        cube[11] = thetaE_amp
        cube[12] = M
        thetaSx_model = thetaS_model[:,0]
        thetaSy_model = thetaS_model[:,1]

        lnlike =  lnlikePhot + lnlike_nonPhot + \
          MCMC_LensModel.lnLikelihood(thetaSx_model, thetaSx_data, xerr_data) + \
          MCMC_LensModel.lnLikelihood(thetaSy_model, thetaSy_data, yerr_data)

        # print "Log Likelihood:  ", lnlike

        return lnlike
		
    ## num_dims = 11
    ## num_params = 13
    num_dims= 11
    num_params= 13  #cube will have this many dimensions
    ev_tol=0.3
    samp_eff=0.8

    #Create param file
    _run = open(saveto + runcode + '_params.run', 'w')
    _run.write('Num Dimensions: %d\n' % num_dims)
    _run.write('Num Params: %d\n' % num_params)
    _run.write('Evidence Tolerance: %.1f\n' % ev_tol)
    _run.write('Sampling Efficiency: %.1f\n' % samp_eff)
    _run.write('Num Live Points: %d\n' % n_live_points)
    _run.close()

    startdir = os.getcwd()
    os.chdir(saveto)

        
    pymultinest.run(likelihood, priors, num_dims, n_params=num_params,
					outputfiles_basename= runcode + '_', 
					verbose=True, resume=False, evidence_tolerance=ev_tol,
					sampling_efficiency=samp_eff, n_live_points=n_live_points,
					multimodal=True, n_clustering_params=num_dims,
                    importance_nested_sampling=False)              

    os.chdir(startdir)

    # Testing
    # lnlike = multinest_run.likelihood([0.01,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.95], 12, 12)
    # print lnlike
    return
コード例 #50
0
ファイル: fitting.py プロジェクト: benjaminpope/pysco
def nest(kpo,paramlimits=[20.,250.,0.,360.,1.0001,10],ndim=3,resume=False,eff=0.3,multi=True,
    max_iter=0,bispec=False):

    '''Default implementation of a MultiNest fitting routine for kernel 
    phase data. Requires a kernel phase kpo object, parameter limits and 
    sensible keyword arguments for the multinest parameters. 

    This function does very naughty things creating functions inside this 
    function because PyMultiNest is very picky about how you pass it
    data.

    Optional parameter eff tunes sampling efficiency, and multi toggles multimodal 
    nested sampling on and off. Turning off multimodal sampling results in a speed 
    boost of ~ 20-30%. 

    '''
    import pymultinest # importing here so you don't have to unless you use nest!

    # make sure you're using the right number of parameters
    nbands = kpo.kpd.shape[0]
    # if 'WFC3' in kpo.hdr['tel']:
    #     bands = str(round(1e9*kpo.hdr['filter'],3))
    #     parameters = ['Separation','Position Angle','Contrast at ' + bands + ' nm']
    #     print bands
    #     print parameters
    # else:
    if np.size(kpo.hdr) == 1:
        bands = str(round(1e6*kpo.hdr['filter'],3))
        parameters = ['Separation','Position Angle','Contrast at ' + bands + ' um']
    else:
        bands = [str(round(1e6*hd['filter'],3)) for hd in kpo.hdr]
        parameters = ['Separation','Position Angle'] + ['Contrast at ' + band + ' um' for band in bands]
    
    n_params = len(parameters)
    ndim = n_params

    def myprior(cube, ndim, n_params,paramlimits=paramlimits):
        cube[0] = (paramlimits[1] - paramlimits[0])*cube[0]+paramlimits[0]
        cube[1] = (paramlimits[3] - paramlimits[2])*cube[1]+paramlimits[2]
        for j in range(2,ndim):
            cube[j] = (paramlimits[5] - paramlimits[4])*cube[j]+paramlimits[4]

    if bispec:
        print 'Using a bispectral analysis'
        def myloglike(cube,ndim,n_params):
            loglike = bispec_loglikelihood(cube,kpo)
            return loglike
    else:
        print 'Modelling kernel phases with nested sampling'
        def myloglike(cube,ndim,n_params):
            loglike = kp_loglikelihood(cube,kpo)
            return loglike

    tic = time.time() # start timing

    #---------------------------------
    # now run MultiNest!
    #---------------------------------

    pymultinest.run(myloglike, myprior, n_params, wrapped_params=[1], resume=resume, 
        verbose=True, sampling_efficiency=eff, multimodal=multi, 
        n_iter_before_update=1000,max_iter=max_iter)

    # let's analyse the results
    a = pymultinest.Analyzer(n_params = n_params)
    s = a.get_stats()

    toc = time.time()

    if toc-tic < 60.:
        print 'Time elapsed =',toc-tic,'s'
    else: 
        print 'Time elapsed =',(toc-tic)/60.,'mins'

    null = -0.5*np.sum(((kpo.kpd)/kpo.kpe)**2)
    # json.dump(s, file('%s.json' % a.outputfiles_basename, 'w'), indent=2)
    print
    print "-" * 30, 'ANALYSIS', "-" * 30
    print "Global Evidence:\n\t%.15e +- %.15e" % ( s['global evidence']-null, s['global evidence error'] )
    params = s['marginals']

    bestsep = params[0]['median']
    seperr = params[0]['sigma']

    if 'Hale' in kpo.hdr['tel']: params[1]['median'] += 220.0 + kpo.hdr['orient']
    elif 'HST'  in kpo.hdr['tel']: params[1]['median'] -= kpo.hdr['orient']
    else:         params[1]['median'] += 0.0

    params[1]['median'] = np.mod(params[1]['median'],360.)

    bestth = params[1]['median']
    therr = params[1]['sigma']

    print 'Separation: %.3f pm %.2f' % (bestsep,seperr)
    print 'Position angle: %.3f pm %.2f' %(bestth,therr)

    if kpo.nsets == 1:
        bestcon = params[2]['median']
        conerr = params[2]['sigma']
        print 'Contrast at',bands,'um: %.3f pm %.3f' % (bestcon,conerr)
    else:
        for j, band in enumerate(bands):

            bestcon = params[j+2]['median']
            conerr = params[j+2]['sigma']
        
            print 'Contrast at',band,'um: %.3f pm %.3f' % (bestcon,conerr)

    return params
コード例 #51
0
ファイル: bayesian_analysis.py プロジェクト: giacomov/3ML
    def sample_multinest(self, n_live_points, chain_name="chains/fit-", quiet=False, **kwargs):
        """
        Sample the posterior with MULTINEST nested sampling (Feroz & Hobson)

        :param: n_live_points: number of MULTINEST livepoints
        :param: chain_names: where to stor the multinest incremental output
        :param: quiet: Whether or not to should results
        :param: **kwargs (pyMULTINEST kwords)

        :return: MCMC samples

        """

        assert has_pymultinest, "You don't have pymultinest installed, so you cannot run the Multinest sampler"

        self._update_free_parameters()

        n_dim = len(self._free_parameters.keys())

        # MULTINEST has a convergence criteria and therefore, there is no way
        # to determine progress

        sampling_procedure = sample_without_progress

        # MULTINEST uses a different call signiture for
        # sampling so we construct callbakcs
        loglike, multinest_prior = self._construct_multinest_posterior()

        # We need to check if the MCMC
        # chains will have a place on
        # the disk to write and if not,
        # create one

        mcmc_chains_out_dir = ""
        tmp = chain_name.split('/')
        for s in tmp[:-1]:
            mcmc_chains_out_dir += s + '/'

        if using_mpi:

            # if we are running in parallel and this is not the
            # first engine, then we want to wait and let everything finish

            if rank != 0:

                # let these guys take a break
                time.sleep(1)

            else:

                # create mcmc chains directory only on first engine

                if not os.path.exists(mcmc_chains_out_dir):
                    os.makedirs(mcmc_chains_out_dir)

        else:
            
            if not os.path.exists(mcmc_chains_out_dir):
                os.makedirs(mcmc_chains_out_dir)


        print("\nSampling\n")
        print("MULTINEST has its own convergence criteria... you will have to wait blindly for it to finish")
        print("If INS is enabled, one can monitor the likelihood in the terminal for completion information")

        # Multinest must be run parallel via an external method
        # see the demo in the examples folder!!

        if threeML_config['parallel']['use-parallel']:

            raise RuntimeError("If you want to run multinest in parallell you need to use an ad-hoc method")

        else:

            sampler = pymultinest.run(loglike,
                                      multinest_prior,
                                      n_dim,
                                      n_dim,
                                      outputfiles_basename=chain_name,
                                      n_live_points=n_live_points,
                                      **kwargs)

        # Use PyMULTINEST analyzer to gather parameter info

        process_fit = False

        if using_mpi:

            # if we are running in parallel and this is not the
            # first engine, then we want to wait and let everything finish

            if rank !=0:

                # let these guys take a break
                time.sleep(5)

                # these engines do not need to read
                process_fit = False

            else:

                # wait for a moment to allow it all to turn off
                time.sleep(5)

                process_fit = True

        else:

            process_fit = True


        if process_fit:

            multinest_analyzer = pymultinest.analyse.Analyzer(n_params=n_dim,
                                                              outputfiles_basename=chain_name)

            # Get the log. likelihood values from the chain
            self._log_like_values = multinest_analyzer.get_equal_weighted_posterior()[:, -1]

            self._sampler = sampler

            self._raw_samples = multinest_analyzer.get_equal_weighted_posterior()[:, :-1]

            # now get the log probability

            self._log_probability_values = self._log_like_values +  np.array([self._log_prior(samples) for samples in self._raw_samples])

            self._build_samples_dictionary()

            self._marginal_likelihood = multinest_analyzer.get_stats()['global evidence'] / np.log(10.)

            self._build_results()

            # Display results
            if not quiet:
                self._results.display()

            # now get the marginal likelihood



            return self.samples
コード例 #52
0
print "\n You are searching for the following parameters: {0}\n".format(parameters)
n_params = len(parameters)

print "\n The total number of parameters is {0}\n".format(n_params)

#####################
# Now, we sample.....
#####################

print "\n Now, we sample... \n"

if args.eccSearch==True:
    dirextension = 'eccSearch'
else:
    dirextension = 'circSearch'

master_path = os.getcwd()
os.chdir(args.datapath)
pymultinest.run(lnprob, my_prior_mnest, n_params, 
                importance_nested_sampling = False, 
                resume = False, verbose = True, n_live_points=1000,
                outputfiles_basename=u'chains_{0}/{0}_'.format(dirextension),
                sampling_efficiency='parameter')







コード例 #53
0
ファイル: run_multinest.py プロジェクト: sdhawan21/euclidIR
import pymultinest as pmn
import pandas as pd
import triangle

from mn_test import *


pmn.run(llhood_w_const, prior_transform_const, 3, verbose=True, n_live_point = 150)


data = pd.read_csv('chains/1-post_equal_weights.dat', names=['H0', 'om', 'w', 'lhood'], delim_whitespace=True, header=False)

triangle.corner(data.values[:,:-1], labels = data.columns[:-1], smooth=1)


コード例 #54
0
ファイル: mnfit.py プロジェクト: JohannesBuchner/mnfit
    def Explore(self):
        '''
        This member function invokes multinest.
        The data must be loaded and the likihood set

        '''
        print bcolors.WARNING+'___________________________________________________'+bcolors.ENDC
        print bcolors.WARNING+'|'+bcolors.OKBLUE+' _____       _____                 _____  _  _   '+bcolors.WARNING+'|'+bcolors.ENDC
        print bcolors.WARNING+'|'+bcolors.OKBLUE+'|     | ___ |   __| ___  ___  ___ |   __||_|| |_ '+bcolors.WARNING+'|'+bcolors.ENDC
        print bcolors.WARNING+'|'+bcolors.OKBLUE+'| | | ||   ||__   || . || -_||  _||   __|| ||  _|'+bcolors.WARNING+'|'+bcolors.ENDC
        print bcolors.WARNING+'|'+bcolors.OKBLUE+'|_|_|_||_|_||_____||  _||___||___||__|   |_||_|  '+bcolors.WARNING+'|'+bcolors.ENDC
        print bcolors.WARNING+'|'+bcolors.OKBLUE+'                   |_|                           '+bcolors.WARNING+'|'+bcolors.ENDC
        print bcolors.WARNING+'|                                                 |'+bcolors.ENDC
        print bcolors.WARNING+'|                                                 |'+bcolors.ENDC
        print bcolors.WARNING+'|                                                 |'+bcolors.ENDC
        print bcolors.WARNING+'|                          '+bcolors.OKGREEN+'-J. Michael Burgess'+bcolors.WARNING+'    |'+bcolors.ENDC
        print bcolors.WARNING+'|                                                 |'+bcolors.ENDC
        print bcolors.WARNING+'|_________________________________________________|'+bcolors.ENDC

        #print
        #print "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
        #print
        #print "License Info:"
        #print "\t Don't read this.\n\t Do whatever the hell you want with this software"
        #print
        #print "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"

        
        if not self._dataLoaded: #Make sure to have loaded data
            print
            print bcolors.FAIL+ "YOU HAVE NOT LOADED ANY DATA!!"+ bcolors.ENDC
            print
            return

        if not self._saveFileSet: #Warn that no savefile is set
            print
            print bcolors.WARNING+"Save file not set!!! Fit params not saved!"+ bcolors.ENDC
            print

        outfilesDir = ""
        tmp = self.basename.split('/')
        for s in tmp[:-1]:
            outfilesDir+=s+'/'
            
        self.outfilesDir = outfilesDir
        

        if not os.path.exists(outfilesDir): os.makedirs(outfilesDir)
        
        # we want to see some output while it is running
        if not self.silent:
            print "SILENT"
            progress = pymultinest.ProgressPlotter(n_params = self.n_params); progress.start()
            threading.Timer(2, show, [self.basename+"phys_live.points.pdf"]).start() # delayed opening
        startTime = time.time()

        self._PreFitInfo()

        # run MultiNest
        pymultinest.run(self.likelihood, self.prior, self.n_params, importance_nested_sampling = self.importance_nested_sampling, resume = self.resume, verbose = self.verbose, sampling_efficiency = self.sampling_efficiency, n_live_points = self.n_live_points,outputfiles_basename=self.basename, init_MPI=False, dump_callback=self.callback,write_output=self.write)


        # ok, done. Stop our progress watcher
        if not self.silent:
            progress.stop()

        print 
        print bcolors.OKGREEN +"Finished sampling in %.2f seconds"%(time.time()-startTime) + bcolors.ENDC
        print
        if self._saveFileSet:
            self._WriteFit()
コード例 #55
0
def run(catalogfile, vel_err, mag_err, N_gauss, outdir, rotate=True):
    """
    PyMultiNest run to determine cluster membership, using PM catalog and
    applying vel_err and mag_err cuts. Output is put in newly-created outdir
    directory (must be a string).

    Parameters:
    catalogflie --> String containing the name of a FITS catalog.
    vel_err --> The maximum allowed velocity error for stars to be included.
    mag_err --> The maximum allowed magnitude error for stars to be included.
    N_gauss --> number bivariate gaussian, where N_gauss <= 4
    outdir --> The output directory name.
    
    Keywords:
    rotate = 1 --> rotate star velocities into RA/DEC format, as opposed
    to X,Y
    """
    # Load data for full field, extract velocities (already converted to mas)
    d = loadData(catalogfile, vel_err, mag_err, rotate=rotate)

    star_Vx = d["fit_vx"]
    star_Vy = d["fit_vy"]
    star_Sigx = d["fit_vxe"]
    star_Sigy = d["fit_vye"]

    N_stars = len(d)

    def print_param(pname, val, logp, headerFirst=False):
        rowHead = "{0:6s}  "
        colHead = " val_{0} (  logp_{0} )"
        colVal = "{0:6.3f} ({1:9.2e})"

        if headerFirst:
            outhdr = "  ".join([colHead.format(k) for k in range(N_gauss)])
            print rowHead.format("") + outhdr

        outstr = "  ".join([colVal.format(val[k], logp[k]) for k in range(N_gauss)])
        print rowHead.format(pname) + outstr

        return

    def priors(cube, ndim, nparams):
        return

    def likelihood(cube, ndim, nparams):
        """
        Define the likelihood function (from Clarkson+12, Hosek+15)
        """
        # start the timer
        t0 = time.time()

        ####################
        # Set up model params
        ####################
        # Number of parameters per Gaussian:
        N_per_gauss = 6

        # Make arrays for the paramters of each Gaussian
        pi = np.arange(N_gauss, dtype=float)
        vx = np.arange(N_gauss, dtype=float)
        vy = np.arange(N_gauss, dtype=float)
        sigA = np.arange(N_gauss, dtype=float)
        sigB = np.arange(N_gauss, dtype=float)
        theta = np.arange(N_gauss, dtype=float)

        # Make arrays for the prior probability of each paramter
        logp_pi = np.arange(N_gauss, dtype=float)
        logp_vx = np.arange(N_gauss, dtype=float)
        logp_vy = np.arange(N_gauss, dtype=float)
        logp_sigA = np.arange(N_gauss, dtype=float)
        logp_sigB = np.arange(N_gauss, dtype=float)
        logp_theta = np.arange(N_gauss, dtype=float)

        # Set the fraction of stars in each Gaussian
        for kk in range(N_gauss):
            pi[kk], logp_pi[kk] = random_pi(cube[kk * N_per_gauss + 0])

        # Make sure all the sum(pi) = 1.
        pi /= pi.sum()

        # Sort the field pi values such that they are always ranked from
        # smallest to largest.
        sidx = pi[1:].argsort()
        pi[1:] = pi[1:][sidx]
        logp_pi[1:] = logp_pi[1:][sidx]

        # Re-set the cube values. Note this is AFTER sorting.
        for kk in range(N_gauss):
            cube[kk * N_per_gauss + 0] = pi[kk]

        # Set the other Gaussian parameters.
        for kk in range(N_gauss):
            # Treat the cluster gaussian (the first, most compact one)
            # with a special prior function.
            if kk == 0:
                rand_vx = random_clust_vx
                rand_vy = random_clust_vy
                rand_sigA = random_clust_sigA
                rand_sigB = random_clust_sigB
                rand_theta = random_clust_theta
            else:
                rand_vx = random_v
                rand_vy = random_v
                rand_sigA = random_sig
                rand_sigB = random_sig
                rand_theta = random_theta

            # Velocity centr
            vx[kk], logp_vx[kk] = rand_vx(cube[kk * N_per_gauss + 1])
            cube[kk * N_per_gauss + 1] = vx[kk]

            vy[kk], logp_vy[kk] = rand_vy(cube[kk * N_per_gauss + 2])
            cube[kk * N_per_gauss + 2] = vy[kk]

            # Major axis
            sigA[kk], logp_sigA[kk] = rand_sigA(cube[kk * N_per_gauss + 3])
            cube[kk * N_per_gauss + 3] = sigA[kk]

            # Minor axis
            sigB[kk], logp_sigB[kk] = rand_sigB(cube[kk * N_per_gauss + 4])
            cube[kk * N_per_gauss + 4] = sigB[kk]

            # Angle of major axis (in radians)
            theta[kk], logp_theta[kk] = rand_theta(cube[kk * N_per_gauss + 5])
            cube[kk * N_per_gauss + 5] = theta[kk]

            # Only want to consider gaussians where Sig A > Sig B
            if sigB[kk] > sigA[kk]:
                # print '#######################'
                # print '#######################'
                # print '#######################'
                # print '#######################'
                return -np.Inf

            # Check that all our prior probabilities are valid, otherwise abort
            # before expensive calculation.
            if (
                (logp_pi[kk] == -np.inf)
                or (logp_vx[kk] == -np.inf)
                or (logp_vy[kk] == -np.inf)
                or (logp_sigA[kk] == -np.inf)
                or (logp_sigB[kk] == -np.inf)
                or (logp_theta[kk] == -np.inf)
            ):
                return -np.Inf

        ################################
        # Calculating likelihood function
        #  Likelihood =
        #    \Sum(i=0 -> N_stars) \Sum(k=0 -> N_gauss)
        #        \pi_k * (2 \pi |\Sigma_k,i|)^{-1/2} *
        #        exp[ -1/2 * (\mu_i - \mu_k)^T \sigma_k,i (\mu_i - \mu_k) ]
        ################################
        # Keep track of the probability for each star, each gaussian
        # component. We will add over components and multiply over stars.
        prob_gauss = np.zeros((N_gauss, N_stars), dtype=float)

        # L_{i,k}  Loop through the different gaussian components.
        for kk in range(N_gauss):
            # N_stars long array
            prob_gauss[kk, :] = prob_ellipse(
                star_Vx, star_Vy, star_Sigx, star_Sigy, pi[kk], vx[kk], vy[kk], sigA[kk], sigB[kk], theta[kk]
            )

        # For each star, the total likelihood is the sum
        # of each component (before log).
        L_star = prob_gauss.sum(axis=0)  # This array should be N-stars long
        logL_star = np.log10(L_star)

        # Final likelihood
        logL = logL_star.sum()
        logL_tmp = logL

        # Add in log(prior probabilities) as well
        for kk in range(N_gauss):
            logL += logp_pi[kk]
            logL += logp_vx[kk]
            logL += logp_vy[kk]
            logL += logp_sigA[kk]
            logL += logp_sigB[kk]
            logL += logp_theta[kk]

        # Some printing
        print "*** logL = {0:9.2e}   w/priors = {1:9.2e}".format(logL_tmp, logL)

        print_param("pi", pi, logp_pi, headerFirst=True)
        print_param("vx", vx, logp_vx)
        print_param("vy", vy, logp_vy)
        print_param("sigA", sigA, logp_sigA)
        print_param("sigB", sigB, logp_sigB)
        print_param("theta", theta, logp_theta)

        t1 = time.time()

        total = t1 - t0

        print "TIME SPENT: " + str(total)
        # pdb.set_trace()
        return logL

    #########################################
    # End Likelihoods
    # Begin running multinest
    #########################################
    # Make new directory to hold output
    fileUtil.mkdir(outdir)
    outroot = outdir + "/mnest_"

    num_dims = 2 * 3 * N_gauss
    num_params = num_dims
    ev_tol = 0.3
    samp_eff = 0.8
    n_live_points = 300

    # Create param file
    _run = open(outroot + "params.run", "w")
    _run.write("Catalog: %s\n" % catalogfile)
    _run.write("Vel Err Cut: %.2f\n" % vel_err)
    _run.write("Mag Err Cut: %.2f\n" % mag_err)
    _run.write("Rotate: %s\n" % str(rotate))
    _run.write("Num Gauss: %d\n" % N_gauss)
    _run.write("Num Dimensions: %d\n" % num_dims)
    _run.write("Num Params: %d\n" % num_params)
    _run.write("Evidence Tolerance: %.1f\n" % ev_tol)
    _run.write("Sampling Efficiency: %.1f\n" % samp_eff)
    _run.write("Num Clustering Params: %d\n" % num_dims)
    _run.write("Num Live Points: %d\n" % n_live_points)
    _run.close()

    # Run multinest
    pymultinest.run(
        likelihood,
        priors,
        num_dims,
        n_params=num_params,
        outputfiles_basename=outroot,
        verbose=True,
        resume=False,
        evidence_tolerance=ev_tol,
        sampling_efficiency=samp_eff,
        n_live_points=n_live_points,
        multimodal=True,
        n_clustering_params=num_dims,
        importance_nested_sampling=False,
    )

    return
コード例 #56
0
    mpirun -np 4 python run_PyPolyChord.py

    on Mac:
    export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/lib
    export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libgfortran.so.3
    export LD_PRELOAD=/opt/local/lib/openmpi//lib/libmpi.so:$LD_PRELOAD
    mpirun -np 4 python run_PyPolyChord.py

'''


# number of dimensions our problem has
parameters = mc.variable_list
n_params = mc.ndim

pymultinest.run(mc.multinest_call, mc.multinest_priors, n_params, resume=True,
                verbose=True, n_live_points=1000,  outputfiles_basename=dir_output + 'chains/2-')


def dump():
    progress = pymultinest.ProgressPlotter(n_params = n_params, outputfiles_basename=dir_output+'chains/2-'); progress.start()
    threading.Timer(2, show, [dir_output+"chains/2-phys_live.points.pdf"]).start() # delayed opening
    # run MultiNest
    pymultinest.run(mc.multinest_call, mc.multinest_priors, n_params, importance_nested_sampling = False, resume = True,
                    verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename=dir_output+'chains/2-')
    # ok, done. Stop our progress watcher
    progress.stop()

    # lets analyse the results
    a = pymultinest.Analyzer(n_params = n_params, outputfiles_basename=dir_output+'chains/2-')
    s = a.get_stats()
コード例 #57
0
def dump():
    progress = pymultinest.ProgressPlotter(n_params = n_params, outputfiles_basename=dir_output+'chains/2-'); progress.start()
    threading.Timer(2, show, [dir_output+"chains/2-phys_live.points.pdf"]).start() # delayed opening
    # run MultiNest
    pymultinest.run(mc.multinest_call, mc.multinest_priors, n_params, importance_nested_sampling = False, resume = True,
                    verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename=dir_output+'chains/2-')
    # ok, done. Stop our progress watcher
    progress.stop()

    # lets analyse the results
    a = pymultinest.Analyzer(n_params = n_params, outputfiles_basename=dir_output+'chains/2-')
    s = a.get_stats()

    # store name of parameters, always useful
    with open('%sparams.json' % a.outputfiles_basename, 'w') as f:
        json.dump(parameters, f, indent=2)
    # store derived stats
    with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
        json.dump(s, f, indent=2)
    print()
    print("-" * 30, 'ANALYSIS', "-" * 30)
    print("Global Evidence:\n\t%.15e +- %.15e" % ( s['nested sampling global log-evidence'], s['nested sampling global log-evidence error'] ))

    import matplotlib.pyplot as plt
    plt.clf()


    # run MultiNest
    #pymultinest.run(mc.pymultinest_call, mc.pymultinest_priors, mc.ndim, outputfiles_basename=dir_output, resume = False, verbose = True)
    #json.dump(parameters, open(dir_output+'params.json', 'w')) # save parameter names

    # Here we will plot all the marginals and whatnot, just to show off
    # You may configure the format of the output here, or in matplotlibrc
    # All pymultinest does is filling in the data of the plot.

    # Copy and edit this file, and play with it.

    p = pymultinest.PlotMarginalModes(a)
    plt.figure(figsize=(5 * n_params, 5 * n_params))
    # plt.subplots_adjust(wspace=0, hspace=0)
    for i in range(n_params):
        plt.subplot(n_params, n_params, n_params * i + i + 1)
        p.plot_marginal(i, with_ellipses=True, with_points=False, grid_points=50)
        plt.ylabel("Probability")
        plt.xlabel(parameters[i])

        for j in range(i):
            plt.subplot(n_params, n_params, n_params * j + i + 1)
            # plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
            p.plot_conditional(i, j, with_ellipses=False, with_points=True, grid_points=30)
            plt.xlabel(parameters[i])
            plt.ylabel(parameters[j])

    plt.savefig(dir_output+"chains/marginals_multinest.pdf")  # , bbox_inches='tight')
    show(dir_output+"chains/marginals_multinest.pdf")

    for i in range(n_params):
        outfile = '%s-mode-marginal-%d.pdf' % (a.outputfiles_basename, i)
        p.plot_modes_marginal(i, with_ellipses=True, with_points=False)
        plt.ylabel("Probability")
        plt.xlabel(parameters[i])
        plt.savefig(outfile, format='pdf', bbox_inches='tight')
        plt.close()

        outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename, i)
        p.plot_modes_marginal(i, cumulative=True, with_ellipses=True, with_points=False)
        plt.ylabel("Cumulative probability")
        plt.xlabel(parameters[i])
        plt.savefig(outfile, format='pdf', bbox_inches='tight')
        plt.close()

    print("Take a look at the pdf files in chains/")
コード例 #58
0
def nested_run(transformations, prior_function = None, sampling_efficiency = 0.3, 
	n_live_points = 400, evidence_tolerance = 0.5,
	outputfiles_basename = 'chains/', verbose=True, **kwargs):
	"""
	Run the Bayesian analysis with specified parameters+transformations.

	If prior is None, uniform priors are used on the passed parameters.
	If parameters is also None, all thawed parameters are used.

	:param transformations: Parameter transformation definitions
	:param prior_function: set only if you want to specify a custom, non-separable prior
	:param outputfiles_basename: prefix for output filenames.
	The remainder are multinest arguments (see PyMultiNest and MultiNest documentation!)

	The remainder are multinest arguments (see PyMultiNest and MultiNest documentation!)
	n_live_points: 400 are often enough
	
	For quick results, use sampling_efficiency = 0.8, n_live_points = 50, 
	evidence_tolerance = 5.
	The real results must be estimated with sampling_efficiency = 0.3 
	and without using const_efficiency_mode, otherwise it is not reliable.
	"""
	
	# for convenience
	if outputfiles_basename.endswith('/'):
		if not os.path.exists(outputfiles_basename):
			os.mkdir(outputfiles_basename)
	
	if prior_function is None:
		prior_function = create_prior_function(transformations)
	oldchatter = Xset.chatter, Xset.logChatter
	Xset.chatter, Xset.logChatter = 0, 0
	def log_likelihood(cube, ndim, nparams):
		try:
			set_parameters(transformations=transformations, values=cube)
			l = -0.5*Fit.statistic
			#print "like = %.1f" % l
			return l
		except Exception as e:
			print 'Exception in log_likelihood function: ', e
			import sys
			sys.exit(-127)
		return -1e300
	# run multinest
	if Fit.statMethod.lower() not in ['cstat', 'cash']:
		raise RuntimeError('ERROR: not using cash (Poisson likelihood) for Poisson data! set Fit.statMethod to cash before analysing (currently: %s)!' % Fit.statMethod)
	
	n_params = len(transformations)
	pymultinest.run(log_likelihood, prior_function, n_params, 
		sampling_efficiency = sampling_efficiency, n_live_points = n_live_points, 
		outputfiles_basename = outputfiles_basename, 
		verbose=verbose, **kwargs)
	
	paramnames = [str(t['name']) for t in transformations]
	json.dump(paramnames, file('%sparams.json' % outputfiles_basename, 'w'), indent=4)
	
	# store as chain too, and try to load it for error computations
	analyzer = pymultinest.Analyzer(n_params = len(transformations), 
		outputfiles_basename = outputfiles_basename)
	posterior = analyzer.get_equal_weighted_posterior()
	chainfilename = '%schain.fits' % outputfiles_basename
	store_chain(chainfilename, transformations, posterior)
	xspec.AllChains.clear()
	Xset.chatter, Xset.logChatter = oldchatter
	xspec.AllChains += chainfilename
	
	# set current parameters to best fit
	set_best_fit(analyzer, transformations)
	
	Xset.chatter, Xset.logChatter = oldchatter
	return analyzer
コード例 #59
0
	chi = 1.
	#print "cube", [cube[i] for i in range(ndim)], cube
	for i in range(ndim):
		chi *= math.cos(cube[i] / 2.)
	#print "returning", math.pow(2. + chi, 5)
	return math.pow(2. + chi, 5)

# number of dimensions our problem has
parameters = ["x", "y"]
n_params = len(parameters)

# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params, outputfiles_basename='chains/2-'); progress.start()
threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike, myprior, n_params, importance_nested_sampling = False, resume = True, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename='chains/2-')
# ok, done. Stop our progress watcher
progress.stop()

# lets analyse the results
a = pymultinest.Analyzer(n_params = n_params, outputfiles_basename='chains/2-')
s = a.get_stats()

import json
# store name of parameters, always useful
with open('%sparams.json' % a.outputfiles_basename, 'w') as f:
	json.dump(parameters, f, indent=2)
# store derived stats
with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
	json.dump(s, f, indent=2)
print()