コード例 #1
0
 def solve_multinest(self, create_analyzer=True):
     """
     Run pymultinest.solve, saving chain and parameter values
     """
     parameters = self.model.labels
     n_params = len(parameters)
     result = solve(LogLikelihood=self.loglike, Prior=self.priors, n_dims=n_params, outputfiles_basename=self.prefix, verbose=True)
     print()
     print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
     print()
     print('parameter values:')
     self.params = []
     for name, col in zip(parameters, result['samples'].transpose()):
         self.params.append(col.mean())
         print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
         
     # make marginal plots by running:
     # $ python multinest_marginals.py chains/3-
     # For that, we need to store the parameter names:
     import json
     if not os.path.exists(self.prefix): 
         os.mkdir(self.prefix)
     else:
         for i in os.listdir('.'):
             shutil.rmtree(self.prefix)
             os.mkdir(self.prefix)
     with open(os.path.join(self.prefix, '%sparams.json' % self.prefix), 'w') as f:
         json.dump(parameters, f, indent=2)
     for i in os.listdir('.'):
         if i.startswith(self.prefix):
             if os.path.isfile(i):
                 shutil.move(i,self.prefix+"/")
     if create_analyzer == True:
         self.analyzer = Analyzer(n_params, outputfiles_basename=self.prefix)
コード例 #2
0
ファイル: multinest.py プロジェクト: blakeaw/Gleipnir
    def deviance_ic(self):
        """Estimate Deviance Information Criterion.
        This function estimates the Deviance Information Criterion (DIC) for the
        model simulated with Nested Sampling (NS). It does so by using the
        posterior distribution estimates computed from the NS outputs.
        The DIC formula is given by:
            DIC = p_D + D_bar,
        where p_D = D_bar - D(theta_bar), D_bar is the posterior average of
        the deviance D(theta)= -2*ln(L(theta)) with L(theta) the likelihood
        of parameter set theta, and theta_bar is posterior average parameter set.

        Returns:
            float: The DIC estimate.
        """
        mn_data = Analyzer(len(self.sampled_parameters),
                           self._file_root,
                           verbose=False).get_data()
        params = mn_data[:, 2:]
        log_likelihoods = -0.5 * mn_data[:, 1]
        prior_mass = mn_data[:, 0]
        norm_weights = (prior_mass * np.exp(log_likelihoods)) / self.evidence
        nw_mask = np.isnan(norm_weights)
        if np.any(nw_mask):
            return np.inf
        D_of_theta = -2. * log_likelihoods
        D_bar = np.average(D_of_theta, weights=norm_weights)
        theta_bar = np.average(params, axis=0, weights=norm_weights)
        D_of_theta_bar = -2. * self.loglikelihood(theta_bar)
        p_D = D_bar - D_of_theta_bar
        return p_D + D_bar
コード例 #3
0
ファイル: multinest.py プロジェクト: blakeaw/Gleipnir
 def max_loglikelihood(self):
     mn_data = Analyzer(len(self.sampled_parameters),
                        self._file_root,
                        verbose=False).get_data()
     log_ls = -0.5 * mn_data[:, 1]
     ml = log_ls.max()
     return ml
コード例 #4
0
ファイル: multinest.py プロジェクト: blakeaw/Gleipnir
 def best_fit_likelihood(self):
     """Parameter vector with the maximum likelihood.
     Returns:
         numpy.array: The parameter vector.
     """
     mn_data = Analyzer(len(self.sampled_parameters),
                        self._file_root,
                        verbose=False).get_data()
     log_ls = -0.5 * mn_data[:, 1]
     midx = np.argmax(log_ls)
     ml = mn_data[midx][2:]
     return ml
コード例 #5
0
	plt.ylabel(r'$r \times \Delta\Sigma$')

	color_cycle = ['black', 'blue', 'green', 'red', 'brown']
	style_cycle = ['--', '-.', ':', '-..']

	for i, (multinest_dir, inv_cov_file, cov_file, label) in enumerate(zip(args.multinest_dir, args.inv_cov, args.cov, args.label)):

		print(f"reading {multinest_dir}")

		line_color = color_cycle[np.mod(i, len(color_cycle))]
		line_style = style_cycle[np.mod(i, len(style_cycle))]

		## read samples
	
		n_dims = 14
		a = Analyzer(n_dims, outputfiles_basename=multinest_dir)
	
		bestfit_params = a.get_best_fit()['parameters']
		bestfit_lnL = a.get_best_fit()['log_likelihood']
		ppd_ml = np.array( bestfit_params[n_dims:] )
	
		ml_chisq = ppd_ml[-1]
		ml_wp, ml_ds = np.split(np.array(ppd_ml[:-1]), 2)
	
		multinest_equal_samples_all = a.get_equal_weighted_posterior()
		multinest_equal_samples = multinest_equal_samples_all[:, :n_dims]
		multinest_ppd = multinest_equal_samples_all[:, n_dims:-1]
	
	
		## compute PPD statistics
	
コード例 #6
0
ファイル: marginals_runfile.py プロジェクト: mnt116/msci-21cm
# importing modules
from __future__ import absolute_import, unicode_literals, print_function
from pymultinest.solve import solve
from pymultinest.analyse import Analyzer
import os
import sys
import shutil
import plot
import matplotlib.pyplot as plt
try:
    os.mkdir('chains')
except OSError:
    pass

do_pretty = True

n_params = 8

prefix = 'multi_fg_1-'

an = Analyzer(n_params, outputfiles_basename=prefix)
for i in range(n_params):
    plt.figure()
    marge = plot.PlotMarginalModes(an)
    if do_pretty == True:
        out = marge.plot_modes_marginal(i, with_ellipses=False)
        plt.savefig("parameter%i_marginal.png" % i)
    else:
        out = marge.plot_conditional(i, with_ellipses=False)
        plt.savefig("posterior%i_marginal.png" % i)
コード例 #7
0
minimizer.set_cube(min_values, max_values)
minimizer.set_chi2_0()

# Make sure the directory with detailed results exists:
dir_out = "chains/"
if not os.path.exists(dir_out):
    os.mkdir(dir_out)
file_prefix = __file__.split(".py")[0]

# Run MultiNest:
run_kwargs = {
    'LogLikelihood': minimizer.ln_likelihood,
    'Prior': minimizer.transform_cube,
    'n_dims': len(parameters_to_fit),
    'outputfiles_basename': os.path.join(dir_out, file_prefix+"_"),
    'resume': False,
    'importance_nested_sampling': False,
    'multimodal': True}
result = solve(**run_kwargs)

# Analyze results - we print each mode separately and give log-evidence:
analyzer = Analyzer(n_params=run_kwargs['n_dims'],
                    outputfiles_basename=run_kwargs['outputfiles_basename'])
modes = analyzer.get_mode_stats()['modes']
for mode in modes:
    print("\nMode {:} parameters:".format(mode['index']))
    for (p, m, s) in zip(parameters_to_fit, mode['mean'], mode['sigma']):
        print("{:6} = {:.4f} +- {:.4f}".format(p, m, s))
    print("local log-evidence: {:.3f} +- {:.3f}".format(
            mode["local log-evidence"], mode["local log-evidence error"]))
コード例 #8
0
            cpm_sources[i].pixel_time + 2450000.,
            MCPM_options['mask_model_epochs'])

# MN fit:
dir_out = os.path.dirname(MN_args['outputfiles_basename'])
if not os.path.exists(dir_out):
    os.mkdir(dir_out)
minimizer.set_MN_cube(mn_min, mn_max)
with redirect_stdout.stdout_redirect_2():
    result = solve(LogLikelihood=minimizer.ln_like,
                   Prior=minimizer.transform_MN_cube,
                   **MN_args)
minimizer.close_file_all_models()

# Analyze output:
analyzer = Analyzer(n_params=MN_args['n_dims'],
                    outputfiles_basename=MN_args['outputfiles_basename'],
                    verbose=False)
stats = analyzer.get_stats()
msg = "Log-eveidence: {:.4f} +- {:.4f}"
log_evidence = stats['nested sampling global log-evidence']
log_evidence_err = stats['nested sampling global log-evidence error']
print(msg.format(log_evidence, log_evidence_err))
print('parameter values:')
for (name, v) in zip(parameters_to_fit, stats['marginals']):
    median = v['median']
    sigmas = [v['1sigma'][1] - median, median - v['1sigma'][0]]
    print('{:7s} : {:.4f} {:.4f} {:.4f}'.format(name, median, *sigmas))
print('Best model:')
minimizer.print_min_chi2()
コード例 #9
0
ファイル: analyze.py プロジェクト: mhieronymus/retro
from scipy import stats

from pymultinest.analyse import Analyzer

llhs = []
bestfits = []

event = 2

for i in range(1000):
    #if os.path.exists('/gpfs/scratch/pde3/retro/test_event%i/log/run_%04d.log'%(event,i)):
    if os.path.exists('/gpfs/scratch/pde3/retro/log_cscd/run_%04d.log' % (i)):
        try:
            #a = Analyzer(8, outputfiles_basename="/gpfs/scratch/pde3/retro/test_event%i/out/tol0.1_evt%i-"%(event,i))
            a = Analyzer(8,
                         outputfiles_basename=
                         "/gpfs/scratch/pde3/retro/out_cscd/tol0.1_evt%i-" %
                         (i))
            bestfit_params = a.get_best_fit()
            llhs.append(bestfit_params['log_likelihood'])
            bestfits.append(bestfit_params['parameters'])
        except IOError:
            pass

llhs = np.array(llhs)
bestfits = np.array(bestfits)

names = [
    'time', 'x', 'y', 'z', 'zenith', 'azimuth', 'energy', 'track_fraction'
]
units = ['ns', 'm', 'm', 'm', 'rad', 'rad', 'GeV', None]
#event 1: