Ejemplo n.º 1
0
def main(args):
    ndim = args.x_dim
    sigma = args.sigma
    width = max(0, 1 - 5 * sigma)
    centers = (np.sin(np.arange(ndim) / 2.) * width + 1.) / 2.
    centers = np.ones(ndim) * 0.5

    def flat_loglike(theta):
        like = -0.5 * (((theta - centers) / sigma)**2).sum() - 0.5 * log(
            2 * np.pi * sigma**2) * ndim
        return like

    def flat_transform(x):
        return x

    import string
    paramnames = list(string.ascii_lowercase)[:ndim]

    if args.pymultinest:
        from pymultinest.solve import solve

        result = solve(LogLikelihood=flat_loglike,
                       Prior=flat_transform,
                       n_dims=ndim,
                       outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
                       verbose=True,
                       resume=True,
                       importance_nested_sampling=False)

        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

    elif args.reactive:
        from ultranest.solvecompat import pymultinest_solve_compat as solve
        result = solve(LogLikelihood=flat_loglike,
                       Prior=flat_transform,
                       n_dims=ndim,
                       outputfiles_basename=args.log_dir + 'RNS-%dd' % ndim,
                       verbose=True,
                       resume=True,
                       importance_nested_sampling=False)

        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
Ejemplo n.º 2
0
def run(model_loc, output_dir, resume):

    model = pickle.load(open(model_loc, 'rb'))

    if not resume:
        if os.path.isdir(output_dir):
            shutil.rmtree(output_dir)
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)

    observed_w1 = 25.5

    # run MultiNest
    start_time = datetime.datetime.now()
    result = solve(
        LogLikelihood=lambda x: gaussian_loglikelihood(model, observed_w1, x),
        Prior=lambda x: scale_parameters(x, model.bounds),
        n_dims=len(model.parameters()),
        outputfiles_basename='out/',
        resume=resume,
        verbose=False,
        max_iter=0,
        importance_nested_sampling=True,
        sampling_efficiency='parameter')
    end_time = datetime.datetime.now()
    print('time', end_time - start_time)

    # save parameter names
    json.dump(model.parameters(),
              open(os.path.join(output_dir, 'params.json'), 'w'))
Ejemplo n.º 3
0
def test():
    def myprior(cube):
        return cube * 10 * pi

    def myloglike(cube):
        chi = (cos(cube / 2.)).prod()
        return (2. + chi)**5

    # number of dimensions our problem has
    parameters = ["x", "y"]
    n_params = len(parameters)

    # run MultiNest
    result = solve(LogLikelihood=myloglike,
                   Prior=myprior,
                   n_dims=n_params,
                   outputfiles_basename="chains/3-")

    with open('%sparams.json' % "chains/3-", 'w') as f:
        json.dump(parameters, f, indent=2)

    print()
    print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
    print()
    print('parameter values:')
    for name, col in zip(parameters, result['samples'].transpose()):
        print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

    assert numpy.isclose(result['logZ'], 236.0, atol=1), result['logZ']
    assert numpy.isclose(result['logZerr'], 0.1, atol=1), result['logZerr']
    def run_multinest(num_live_points):
        ### multinest
        try:
            from pymultinest.solve import solve
        except:
            raise ImportError("Multinest is not installed.\nFollow directions on http://johannesbuchner.github.io/PyMultiNest/install.html.")
        import os
        os.makedirs('chains', exist_ok=True)
        prefix = "chains/3-"
        parameters = ['w', "x", "y", 'z']

        def loglikelihood(theta):
            """Multivariate normal log-likelihood."""
            nDims = len(theta)
            r2 = sum(theta**2)
            logL = -log(2*pi*sigma*sigma)*nDims/2.0
            logL += -r2/2/sigma/sigma
            return logL

        # prior transform (iid standard normal prior)
        def prior_transform(u):
            """Transforms our unit cube samples `u` to a standard normal prior."""
            return u*2.-1.

        # run MultiNest
        t0 = default_timer()
        result = solve(LogLikelihood=loglikelihood, Prior=prior_transform,
            n_dims=ndims, outputfiles_basename=prefix, verbose=False,
                       n_live_points=num_live_points)
        run_time = default_timer() - t0
        print("Multinest results:", result)
        return run_time
Ejemplo n.º 5
0
 def solve_multinest(self, create_analyzer=True):
     """
     Run pymultinest.solve, saving chain and parameter values
     """
     parameters = self.model.labels
     n_params = len(parameters)
     result = solve(LogLikelihood=self.loglike, Prior=self.priors, n_dims=n_params, outputfiles_basename=self.prefix, verbose=True)
     print()
     print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
     print()
     print('parameter values:')
     self.params = []
     for name, col in zip(parameters, result['samples'].transpose()):
         self.params.append(col.mean())
         print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
         
     # make marginal plots by running:
     # $ python multinest_marginals.py chains/3-
     # For that, we need to store the parameter names:
     import json
     if not os.path.exists(self.prefix): 
         os.mkdir(self.prefix)
     else:
         for i in os.listdir('.'):
             shutil.rmtree(self.prefix)
             os.mkdir(self.prefix)
     with open(os.path.join(self.prefix, '%sparams.json' % self.prefix), 'w') as f:
         json.dump(parameters, f, indent=2)
     for i in os.listdir('.'):
         if i.startswith(self.prefix):
             if os.path.isfile(i):
                 shutil.move(i,self.prefix+"/")
     if create_analyzer == True:
         self.analyzer = Analyzer(n_params, outputfiles_basename=self.prefix)
def test():
	def myprior(cube):
		return cube * 10 * pi

	def myloglike(cube):
		chi = (cos(cube / 2.)).prod()
		return (2. + chi)**5

	# number of dimensions our problem has
	parameters = ["x", "y"]
	n_params = len(parameters)

	# run MultiNest
	result = solve(LogLikelihood=myloglike, Prior=myprior, 
		n_dims=n_params, outputfiles_basename="chains/3-")
	
	with open('%sparams.json' % "chains/3-", 'w') as f:
		json.dump(parameters, f, indent=2)

	print()
	print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
	print()
	print('parameter values:')
	for name, col in zip(parameters, result['samples'].transpose()):
		print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

	assert numpy.isclose(result['logZ'], 236.0, atol=1), result['logZ']
	assert numpy.isclose(result['logZerr'], 0.1, atol=1), result['logZerr']
Ejemplo n.º 7
0
 def run(self, verbose=False):
     """Initiate the MultiNest Nested Sampling run."""
     output = solve(LogLikelihood=self.loglikelihood,
                    Prior=self._prior,
                    n_dims=self._nDims,
                    n_live_points=self.population_size,
                    outputfiles_basename=self._file_root,
                    verbose=verbose,
                    **self.multinest_kwargs)
     self._output = output
     return self.log_evidence, self.log_evidence_error
Ejemplo n.º 8
0
def test_2():
    """testing n_params != n_dims case"""
    def myprior(cube):
        test_2.prior_dim = cube.shape
        cube[-1] = numpy.sum(cube[:-1])
        return cube * 10 * pi

    def myloglike(cube):
        test_2.params_dim = cube.shape
        chi = (cos(cube / 2.)).prod()
        return (2. + chi)**5

    test_2.prior_dim = None
    test_2.params_dim = None

    # number of dimensions our problem has
    parameters = ["x", "y"]
    n_dims = len(parameters)
    n_params = n_dims + 1

    # run MultiNest
    result = solve(LogLikelihood=myloglike,
                   Prior=myprior,
                   n_dims=n_dims,
                   n_params=n_params,
                   outputfiles_basename="chains/5-")

    with open('%sparams.json' % "chains/5-", 'w') as f:
        json.dump(parameters, f, indent=2)

    parameters += ['x+y']
    print()
    print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
    print()
    print('parameter values:')
    for name, col in zip(parameters, result['samples'].transpose()):
        print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

    assert test_2.prior_dim == (n_params, )
    assert test_2.params_dim == (n_dims, )

    assert numpy.isclose(result['logZ'], 236.0, atol=1), result['logZ']
    assert numpy.isclose(result['logZerr'], 0.1, atol=1), result['logZerr']
Ejemplo n.º 9
0
Archivo: model.py Proyecto: dioph/dwelf
    def multinest(self,
                  sampling_efficiency=.01,
                  const_efficiency_mode=True,
                  n_live_points=4000,
                  **kwargs):
        """Runs MultiNest to sample the posterior distribution

        Parameters
        ----------
        sampling_efficiency: defines the sampling efficiency (default=0.01)
        const_efficiency_mode: whether to run in constant efficiency mode (default=True)
        n_live_points: number of live points (default=4000)
        **kwargs:
            importance_nested_sampling: whether to activate INS (default=True)
            evidence_tolerance: evidence tolerance factor (default=0.5)
            outputfiles_basename: root for MultiNest output files (default="chains/1-")
            verbose: updates on sampling progress (default=False)

        Returns
        -------
        results: data returned by pymultinest.solve.solve
        """
        def prior(cube):
            return cube

        def logl(cube):
            theta = self.sample(cube)
            n = self.t.size
            c = -.5 * n * np.log(2 * np.pi) - .5 * np.log(self.dy).sum()
            return c - .5 * self.chi(theta)

        results = solve(LogLikelihood=logl,
                        Prior=prior,
                        n_dims=self.ndim,
                        sampling_efficiency=sampling_efficiency,
                        const_efficiency_mode=const_efficiency_mode,
                        n_live_points=n_live_points,
                        **kwargs)
        return results
Ejemplo n.º 10
0
def sampler_multinest(D):

    st = time.time()

    if not os.path.exists("chains"):
        os.mkdir("chains")

    prefix = "chains/" + D['outname'] + "-"

    # number of live points to maintain in the multinest sampler
    D['nlive'] = 800

    def L(param):
        likelihood = D['likelihood']
        return likelihood(param, D)

    def P(cube):
        return prior_multinest(cube, D)

    result = solve(LogLikelihood=L,
                   Prior=P,
                   n_dims=D['nparam'],
                   n_live_points=D['nlive'],
                   outputfiles_basename=prefix,
                   sampling_efficiency=0.8,
                   evidence_tolerance=0.5,
                   resume=False)

    D['lnZ'] = result['logZ']
    D['dlnZ'] = result['logZerr']
    D['rawtrace'] = result['samples']
    print(D['rawtrace'].shape)

    D['sampling_time'] = np.round(time.time() - st, 2)

    return D
Ejemplo n.º 11
0
def main(args):
    ndim = args.x_dim
    adaptive_nsteps = args.adapt_steps
    if adaptive_nsteps is None:
        adaptive_nsteps = False

    def loglike(theta):
        a = theta[:, :-1]
        b = theta[:, 1:]
        return -2 * (100 * (b - a**2)**2 + (1 - a)**2).sum(axis=1)

    def transform(u):
        return u * 20 - 10

    def transform_loglike_gradient(u):
        theta = u * 20 - 10

        a = theta[:-1]
        b = theta[1:]
        grad = theta.copy()
        L = -2 * (100 * (b - a**2)**2 + (1 - a)**2).sum()
        for i in range(ndim):
            a = theta[i]
            if i < ndim - 1:
                b = theta[i + 1]
                grad[i] = -2 * (-400 * a * (b - a**2) - 2 * (1 - a))
            if i > 0:
                c = theta[i - 1]
                grad[i] += -400 * (a - c**2)

        prior_factor = 20

        return theta, L, grad * prior_factor

    def gradient(u):
        theta = u * 20 - 10

        grad = theta.copy()
        for i in range(ndim):
            a = theta[i]
            if i < ndim - 1:
                b = theta[i + 1]
                grad[i] = -2 * (-400 * a * (b - a**2) - 2 * (1 - a))
            if i > 0:
                c = theta[i - 1]
                grad[i] += -400 * (a - c**2)

        prior_factor = 20

        return grad * prior_factor

    paramnames = ['param%d' % (i + 1) for i in range(ndim)]

    if args.pymultinest:
        from pymultinest.solve import solve

        def flat_loglike(theta):
            return loglike(theta.reshape((1, -1)))

        result = solve(LogLikelihood=flat_loglike,
                       Prior=transform,
                       n_dims=ndim,
                       outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
                       verbose=True,
                       resume=True,
                       importance_nested_sampling=False)

        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

    elif args.reactive:
        if args.slice:
            log_dir = args.log_dir + 'RNS-%dd-slice%d' % (ndim,
                                                          args.slice_steps)
        elif args.harm:
            log_dir = args.log_dir + 'RNS-%dd-harm%d' % (ndim,
                                                         args.slice_steps)
        elif args.dyhmc:
            log_dir = args.log_dir + 'RNS-%dd-dyhmc%d' % (ndim,
                                                          args.slice_steps)
        elif args.dychmc:
            log_dir = args.log_dir + 'RNS-%dd-dychmc%d' % (ndim,
                                                           args.slice_steps)
        else:
            log_dir = args.log_dir + 'RNS-%dd' % (ndim)
        if adaptive_nsteps:
            log_dir = log_dir + '-adapt%s' % (adaptive_nsteps)

        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames,
                                        loglike,
                                        transform=transform,
                                        log_dir=log_dir,
                                        resume=True,
                                        vectorized=True)
        if args.slice:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(
                nsteps=args.slice_steps,
                adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        if args.harm:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(
                nsteps=args.slice_steps,
                adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        if args.dyhmc:
            import ultranest.dyhmc
            from ultranest.utils import verify_gradient
            verify_gradient(ndim,
                            transform,
                            loglike,
                            transform_loglike_gradient,
                            combination=True)
            sampler.stepsampler = ultranest.dyhmc.DynamicHMCSampler(
                ndim=ndim,
                nsteps=args.slice_steps,
                transform_loglike_gradient=transform_loglike_gradient,
                adaptive_nsteps=adaptive_nsteps)
        if args.dychmc:
            import ultranest.dychmc
            from ultranest.utils import verify_gradient
            verify_gradient(ndim, transform, loglike, gradient)
            sampler.stepsampler = ultranest.dychmc.DynamicCHMCSampler(
                ndim=ndim,
                nsteps=args.slice_steps,
                transform=transform,
                loglike=loglike,
                gradient=gradient,
                adaptive_nsteps=adaptive_nsteps)
        sampler.run(frac_remain=0.5,
                    min_num_live_points=args.num_live_points,
                    max_num_improvement_loops=1)
        sampler.print_results()
        if sampler.stepsampler is not None:
            sampler.stepsampler.plot(filename=log_dir +
                                     '/stepsampler_stats_region.pdf')
        if ndim <= 20:
            sampler.plot()
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames,
                                loglike,
                                transform=transform,
                                num_live_points=args.num_live_points,
                                vectorized=True,
                                log_dir=args.log_dir + '-%dd' % ndim,
                                resume=True)
        sampler.run()
        sampler.print_results()
        sampler.plot()
Ejemplo n.º 12
0
from Likelihoods.nest_plik import planck

from pymultinest.solve import Solver
from pymultinest.analyse import Analyzer
from pymultinest.solve import solve
from pymultinest.watch import ProgressPlotter
basename = '/home/kmaylor/int_neutrinos/Multinest_output/Plik/plik'
p = planck()
ProgressPlotter(8, interval_ms=20, outputfiles_basename=basename)
solution = solve(LogLikelihood=p.LogLikelihood,
                 Prior=p.Prior,
                 n_dims=8,
                 n_params=8,
                 n_clustering_params=1,
                 outputfiles_basename=basename,
                 verbose=True,
                 n_live_points=400,
                 max_modes=2,
                 evidence_tolerance=.1)

print(solution)
R = gaussian_kernel(x, x, ls)
ydata = df.loc[x, '2':'5'].values.T
# print(ydata)


# number of dimensions our problem has
parameters = ["mu2", "sigma2", "mu3", "sigma3",
              "mu4", "sigma4", "mu5", "sigma5"]
n_params = len(parameters)
# name of the output files
prefix = "chains/not_iid-"
datafile = prefix + datafile + "_"

# run MultiNest
result = solve(
    LogLikelihood=loglike, Prior=prior,
    n_dims=n_params, outputfiles_basename=datafile, resume=False, verbose=True)
# json.dump(parameters, open(datafile + 'params.json', 'w'))  # save parameter names

# # plot the distribution of a posteriori possible models
# fig = plt.figure()
# cax = fig.add_subplot(131)
# # plt.plot(x, ydata.T, '+ ', color='red', label='data')
# a = pymultinest.Analyzer(outputfiles_basename=datafile, n_params=n_params)
# colors = ['orange', 'green', 'blue', 'red']
# mulist, siglist = zip(*a.get_equal_weighted_posterior()[::, :-1])
# for (mu, sigma) in a.get_equal_weighted_posterior()[::100, :-1]:
#     for i, dX in enumerate(ydata):
#         n = i + 2
#         plt.plot(x, dX/Q**n, '-', color=colors[i],
#                  alpha=0.2, label='c{}'.format(n))
minimizer.set_cube(min_values, max_values)
minimizer.set_chi2_0()

# Make sure the directory with detailed results exists:
dir_out = "chains/"
if not os.path.exists(dir_out):
    os.mkdir(dir_out)
file_prefix = __file__.split(".py")[0]

# Run MultiNest:
run_kwargs = {
    'LogLikelihood': minimizer.ln_likelihood,
    'Prior': minimizer.transform_cube,
    'n_dims': len(parameters_to_fit),
    'outputfiles_basename': os.path.join(dir_out, file_prefix+"_"),
    'resume': False,
    'importance_nested_sampling': False,
    'multimodal': True}
result = solve(**run_kwargs)

# Analyze results - we print each mode separately and give log-evidence:
analyzer = Analyzer(n_params=run_kwargs['n_dims'],
                    outputfiles_basename=run_kwargs['outputfiles_basename'])
modes = analyzer.get_mode_stats()['modes']
for mode in modes:
    print("\nMode {:} parameters:".format(mode['index']))
    for (p, m, s) in zip(parameters_to_fit, mode['mean'], mode['sigma']):
        print("{:6} = {:.4f} +- {:.4f}".format(p, m, s))
    print("local log-evidence: {:.3f} +- {:.3f}".format(
            mode["local log-evidence"], mode["local log-evidence error"]))
Ejemplo n.º 15
0
    'u_0': u_0,
    't_E': t_E,
    'pi_E_N': pi_E_N,
    'pi_E_E': pi_E_E,
    't_0_par': t_0_par
})
event = MM.Event(datasets=datasets, model=model)
minimizer = Minimizer(event, parameters_to_fit, cpm_source)
minimizer.file_all_models = file_all_prefix
minimizer.set_chi2_0(np.sum([len(d.time) for d in datasets]))

# MultiNest fit:
if not os.path.exists(dir_out):
    os.mkdir(dir_out)
minimizer.set_MN_cube(mn_min, mn_max)
result_2 = solve(LogLikelihood=minimizer.ln_like,
                 Prior=minimizer.transform_MN_cube,
                 n_dims=n_params,
                 max_iter=max_iter,
                 n_clustering_params=n_modes,
                 n_live_points=n_live_points,
                 outputfiles_basename=dir_out + file_prefix,
                 resume=False)
print('parameter values:')
for name, col in zip(parameters_to_fit, result_2['samples'].transpose()):
    print('{:10s} : {:.4f} +- {:.4f}'.format(name, col.mean(), col.std()))
minimizer.print_min_chi2()
print()
minimizer.reset_min_chi2()
minimizer.close_file_all_models()
Ejemplo n.º 16
0
    return mxlik


# This code both runs PyMultiNest and generates forest plots
# To run PyMultiNest uncomment the below block while commenting the plotting block

#--------------------------running multinest------------------
nlive = par.nlive  # Number of live walkers
tol = 0.3

parameters = [
    r'$\theta_{j}$', r'$log n_{0}$', 'p', r'$log \epsilon_{B}$',
    r'$log \epsilon_{E}$', r'$log Ek$'
]
#parameters=['s',r'$p_{FS}$',r'$\lambda$',r'$\iota$',r'$log\/\nu_m$',r'$log\/\nu_c$',r'$log\/f_m$',r'$log\/\tau_m$']

if not os.path.exists(grb):
    os.makedirs(grb)
solve(loglike,
      prior,
      n_dims=ndim,
      outputfiles_basename=grb + '/' + datafile + '_',
      resume=True,
      verbose=False,
      n_live_points=nlive,
      sampling_efficiency=0.3)
json.dump(parameters, open(grb + '/' + datafile + '_params.json',
                           'w'))  # save parameter names

#-----------------------multinest OVER--------------------------
Ejemplo n.º 17
0
def main(args):
    ndim = args.x_dim
    adaptive_nsteps = args.adapt_steps
    if adaptive_nsteps is None:
        adaptive_nsteps = False

    rv1a = scipy.stats.loggamma(1, loc=2. / 3, scale=1. / 30)
    rv1b = scipy.stats.loggamma(1, loc=1. / 3, scale=1. / 30)
    rv2a = scipy.stats.norm(2. / 3, 1. / 30)
    rv2b = scipy.stats.norm(1. / 3, 1. / 30)
    rv_rest = []
    for i in range(2, ndim):
        if i <= (ndim + 2) / 2:
            rv = scipy.stats.loggamma(1, loc=2. / 3., scale=1. / 30)
        else:
            rv = scipy.stats.norm(2. / 3, 1. / 30)
        rv_rest.append(rv)
        del rv

    def loglike(theta):
        L1 = log(0.5 * rv1a.pdf(theta[:, 0]) + 0.5 * rv1b.pdf(theta[:, 0]) +
                 1e-300)
        L2 = log(0.5 * rv2a.pdf(theta[:, 1]) + 0.5 * rv2b.pdf(theta[:, 1]) +
                 1e-300)
        Lrest = np.sum(
            [rv.logpdf(t) for rv, t in zip(rv_rest, theta[:, 2:].transpose())],
            axis=0)
        #assert L1.shape == (len(theta),)
        #assert L2.shape == (len(theta),)
        #assert Lrest.shape == (len(theta),), Lrest.shape
        like = L1 + L2 + Lrest
        like = np.where(like < -1e300,
                        -1e300 - ((np.asarray(theta) - 0.5)**2).sum(), like)
        assert like.shape == (len(theta), ), (like.shape, theta.shape)
        return like

    def transform(x):
        return x

    paramnames = ['param%d' % (i + 1) for i in range(ndim)]

    if args.pymultinest:
        from pymultinest.solve import solve

        def flat_loglike(theta):
            return loglike(theta.reshape((1, -1)))

        result = solve(LogLikelihood=flat_loglike,
                       Prior=transform,
                       n_dims=ndim,
                       outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
                       verbose=True,
                       resume=True,
                       importance_nested_sampling=False)

        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

    elif args.reactive:
        if args.slice:
            log_dir = args.log_dir + 'RNS-%dd-slice%d' % (ndim,
                                                          args.slice_steps)
        elif args.harm:
            log_dir = args.log_dir + 'RNS-%dd-harm%d' % (ndim,
                                                         args.slice_steps)
        elif args.dyhmc:
            log_dir = args.log_dir + 'RNS-%dd-dyhmc%d' % (ndim,
                                                          args.slice_steps)
        elif args.dychmc:
            log_dir = args.log_dir + 'RNS-%dd-dychmc%d' % (ndim,
                                                           args.slice_steps)
        else:
            log_dir = args.log_dir + 'RNS-%dd' % (ndim)
        if adaptive_nsteps:
            log_dir = log_dir + '-adapt%s' % (adaptive_nsteps)

        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames,
                                        loglike,
                                        transform=transform,
                                        log_dir=log_dir,
                                        resume=True,
                                        vectorized=True)
        if args.slice:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(
                nsteps=args.slice_steps,
                adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w')
                if sampler.mpi_rank == 0 else False)
        if args.harm:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(
                nsteps=args.slice_steps,
                adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w')
                if sampler.mpi_rank == 0 else False)
        #if args.dyhmc:
        #    import ultranest.dyhmc
        #    verify_gradient(ndim, transform, loglike, transform_loglike_gradient, combination=True)
        #    sampler.stepsampler = ultranest.dyhmc.DynamicHMCSampler(ndim=ndim, nsteps=args.slice_steps,
        #        transform_loglike_gradient=transform_loglike_gradient, adaptive_nsteps=adaptive_nsteps)
        #if args.dychmc:
        #    import ultranest.dychmc
        #    verify_gradient(ndim, transform, loglike, gradient)
        #    sampler.stepsampler = ultranest.dychmc.DynamicCHMCSampler(ndim=ndim, nsteps=args.slice_steps,
        #        transform=transform, loglike=loglike, gradient=gradient, adaptive_nsteps=adaptive_nsteps)
        sampler.run(frac_remain=0.5,
                    min_num_live_points=args.num_live_points,
                    max_num_improvement_loops=1)
        sampler.print_results()
        if sampler.stepsampler is not None:
            sampler.stepsampler.plot(filename=log_dir +
                                     '/stepsampler_stats_region.pdf')
        if ndim <= 20:
            sampler.plot()
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames,
                                loglike,
                                transform=transform,
                                num_live_points=args.num_live_points,
                                vectorized=True,
                                log_dir=args.log_dir + '-%dd' % ndim,
                                resume=True)
        sampler.run()
        sampler.print_results()
        sampler.plot()
Ejemplo n.º 18
0
    f.write(f"# Ran with evidence_tolerance = {tol}, n_live_points = {pts}\n")
    for i, (name, tex) in enumerate(params.items()):
        f.write(f"lab{i+1}={tex}\n")
        
MPI_PROCESSES = MPI.COMM_WORLD.Get_size()
# Only print status updates on one process if using MPI
if MPI_PROCESSES == 1:
    print("Not using MPI")
    OUTPUT = True
else:
    if MPI.COMM_WORLD.Get_rank() == 0:
        print(f"Using MPI with {MPI_PROCESSES} processes")
        OUTPUT = True
    else:
        OUTPUT = False

if OUTPUT:
    print("Started at", datetime.datetime.now().astimezone().replace(microsecond=0).isoformat())
    print(f"Running with evidence_tolerance = {tol}, n_live_points = {pts}")

# run MultiNest
result = solve(LogLikelihood=my_loglikelihood, Prior=my_prior, n_dims=n_params,
               evidence_tolerance=tol, n_live_points = pts,
               outputfiles_basename=prefix, verbose=True)

if OUTPUT:
    print('parameter values:')
    for name, col in zip(params.keys(), result['samples'].transpose()):
        print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
    print("Finished at", datetime.datetime.now().astimezone().replace(microsecond=0).isoformat())
Ejemplo n.º 19
0
def main(args):
    ndim = args.x_dim
    adaptive_nsteps = args.adapt_steps
    if adaptive_nsteps is None:
        adaptive_nsteps = False

    def flat_loglike(theta):
        delta = np.max(np.abs(theta - 0.5))
        volume_enclosed = ndim * np.log(delta + 1e-15)
        if volume_enclosed > -100:
            return -volume_enclosed
        else:
            return +100

    def loglike(theta):
        delta = np.max(np.abs(theta - 0.5), axis=1)
        volume_enclosed = ndim * np.log(delta + 1e-15)
        like = -volume_enclosed
        like[~(like < +100)] = 100
        return like

    def flat_transform(x):
        return x
    def transform(x):
        return x
    
    paramnames = ['param%d' % (i+1) for i in range(ndim)]
    
    if args.pymultinest:
        from pymultinest.solve import solve
        
        result = solve(LogLikelihood=flat_loglike, Prior=flat_transform, 
            n_dims=ndim, outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
            verbose=True, resume=True, importance_nested_sampling=False)
        
        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
    
    elif args.reactive:
        if args.slice:
            log_dir = args.log_dir + 'RNS-%dd-slice%d' % (ndim, args.slice_steps)
        elif args.harm:
            log_dir = args.log_dir + 'RNS-%dd-harm%d' % (ndim, args.slice_steps)
        elif args.dyhmc:
            log_dir = args.log_dir + 'RNS-%dd-dyhmc%d' % (ndim, args.slice_steps)
        elif args.dychmc:
            log_dir = args.log_dir + 'RNS-%dd-dychmc%d' % (ndim, args.slice_steps)
        else:
            log_dir = args.log_dir + 'RNS-%dd' % (ndim)
        if adaptive_nsteps:
            log_dir = log_dir + '-adapt%s' % (adaptive_nsteps)
        
        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform, 
            log_dir=log_dir, resume=True,
            vectorized=True)
        if args.slice:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        if args.harm:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        """
        if args.dyhmc:
            import ultranest.dyhmc
            from ultranest.utils import verify_gradient
            verify_gradient(ndim, transform, loglike, transform_loglike_gradient, combination=True)
            sampler.stepsampler = ultranest.dyhmc.DynamicHMCSampler(ndim=ndim, nsteps=args.slice_steps, 
                transform_loglike_gradient=transform_loglike_gradient)
        if args.dychmc:
            import ultranest.dychmc
            from ultranest.utils import verify_gradient
            verify_gradient(ndim, transform, loglike, gradient, verbose=True)
            sampler.stepsampler = ultranest.dychmc.DynamicCHMCSampler(ndim=ndim, nsteps=args.slice_steps, 
                transform=transform, loglike=loglike, gradient=gradient)
        """
        sampler.run(frac_remain=0.5, min_num_live_points=args.num_live_points, max_num_improvement_loops=1)
        sampler.print_results()
        if sampler.stepsampler is not None:
            sampler.stepsampler.plot(filename = log_dir + '/stepsampler_stats_region.pdf')
        if ndim <= 20:
            sampler.plot()
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames, loglike, transform=transform, 
            num_live_points=args.num_live_points, vectorized=True,
            log_dir=args.log_dir + '-%dd' % ndim, resume=True)
        sampler.run()
        sampler.print_results()
        sampler.plot()
Ejemplo n.º 20
0
        minimizer.fit_blending[ind] = False

if 'mask_model_epochs' in MCPM_options:
    for i in range(minimizer.n_sat):
        minimizer.model_masks[i] = utils.mask_nearest_epochs(
            cpm_sources[i].pixel_time + 2450000.,
            MCPM_options['mask_model_epochs'])

# MN fit:
dir_out = os.path.dirname(MN_args['outputfiles_basename'])
if not os.path.exists(dir_out):
    os.mkdir(dir_out)
minimizer.set_MN_cube(mn_min, mn_max)
with redirect_stdout.stdout_redirect_2():
    result = solve(LogLikelihood=minimizer.ln_like,
                   Prior=minimizer.transform_MN_cube,
                   **MN_args)
minimizer.close_file_all_models()

# Analyze output:
analyzer = Analyzer(n_params=MN_args['n_dims'],
                    outputfiles_basename=MN_args['outputfiles_basename'],
                    verbose=False)
stats = analyzer.get_stats()
msg = "Log-eveidence: {:.4f} +- {:.4f}"
log_evidence = stats['nested sampling global log-evidence']
log_evidence_err = stats['nested sampling global log-evidence error']
print(msg.format(log_evidence, log_evidence_err))
print('parameter values:')
for (name, v) in zip(parameters_to_fit, stats['marginals']):
    median = v['median']
Ejemplo n.º 21
0
# Script to test MultiNest when running with a long file name (> than 100 characters)

if __name__ == '__main__':
    # avoid running in pytest
    from pymultinest.solve import solve

    solve(
        lambda cube: -0.5 * (cube**2).sum(),
        lambda cube: cube,
        2,
        resume=True,
        verbose=True,
        outputfiles_basename=
        "a_really_really_really_really_really_really_really_really_really_really_really_really_really_really_really_really_really_long_name-"
    )
Ejemplo n.º 22
0
def run(outputDirectory,
        outputPrefix,
        ranges,
        parameters,
        loglikelihood,
        log_zero=-1e100,
        n_live_points=400):
    '''
	Wrapper for PyMultiNest. The arguments are:

		outputDirectory	-	The directory in which to place output files.
		outputPrefix	-	The prefix for all output file names.
		ranges			-	The ranges of all parameters. This is a list of tuples, each of which
							is of the form (min, max).
		parameters		-	The names of all parameters. This is a list whose entries correspond to those
							in ranges.
		loglikelihood 	-	A function which returns the log likelihood value for the model given parameter
							values.
		n_live_points	-	The number of live points to use while sampling.

	There is no return value, as all output is written to files.
	'''

    prefix = outputDirectory + '/' + outputPrefix

    if not os.path.exists(outputDirectory) and MPI.COMM_WORLD.Get_rank() == 0:
        os.makedirs(outputDirectory)

    ndim = len(ranges)

    def pri(cube):
        return Prior(cube, ranges)

    result = solve(LogLikelihood=loglikelihood,
                   Prior=pri,
                   n_dims=ndim,
                   importance_nested_sampling=False,
                   resume=False,
                   outputfiles_basename=prefix,
                   verbose=True,
                   n_live_points=n_live_points,
                   log_zero=log_zero)

    print()
    print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
    print()
    print('parameter values:')

    for name, col in zip(parameters, result['samples'].transpose()):
        print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

    if MPI.COMM_WORLD.Get_rank() == 0:
        with open('%sparams.json' % prefix, 'w') as f:
            json.dump(parameters, f, indent=2)

        print(prefix)
        parameters = json.load(open(prefix + 'params.json'))
        n_params = len(parameters)

        a = pymultinest.Analyzer(n_params=n_params,
                                 outputfiles_basename=prefix)
        s = a.get_stats()
        json.dump(s, open(prefix + 'stats.json', 'w'), indent=4)
Ejemplo n.º 23
0
def model(x, s, w, p, phase):
    xwrapped = numpy.fmod(x / p - phase + 2, 1)
    return (1 - s) + s * scipy.stats.norm.pdf(xwrapped, 0.5, w / p)


def loglikelihood(params):
    s, w, p, phase = params
    prob = model(samples, s, w, p, phase)
    loglike = numpy.log(prob.mean(axis=1) + 1e-300).sum()
    print 'Like: %.1f' % loglike, params
    return loglike


sol = solve(loglikelihood,
            transform,
            n_dims=len(parameter_names),
            outputfiles_basename=prefix + '_gauss')
json.dump(parameter_names, open(prefix + '_gaussparams.json', 'w'))
print 'Evidence log Z = %.1f +- %.1f' % (sol['logZ'], sol['logZerr'])

x = numpy.linspace(5, 300, 4000)
plt.subplot(2, 1, 1)
plt.errorbar(x=data[:, 0],
             xerr=data[:, 1],
             y=numpy.random.normal(size=len(data)) + 0.5,
             linestyle=' ')
plt.xlim(x[0], x[-1])
plt.subplot(2, 1, 2)
plt.xlim(x[0], x[-1])
from posterierr.quantileshades import Shade
shade = Shade(x)
Ejemplo n.º 24
0
def myprior(cube):
	return cube * 10 * pi

def myloglike(cube):
	chi = (cos(cube / 2.)).prod()
	return (2. + chi)**5

# number of dimensions our problem has
parameters = ["x", "y"]
n_params = len(parameters)
# name of the output files
prefix = "chains/3-"

# run MultiNest
result = solve(LogLikelihood=myloglike, Prior=myprior, 
	n_dims=n_params, outputfiles_basename=prefix, verbose=True)

print()
print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(parameters, result['samples'].transpose()):
	print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

# make marginal plots by running:
# $ python multinest_marginals.py chains/3-
# For that, we need to store the parameter names:
import json
with open('%sparams.json' % prefix, 'w') as f:
	json.dump(parameters, f, indent=2)
Ejemplo n.º 25
0
    ppFS, lgeps_e, lgeps_B, thetaJ, lgE52, lgdens, Eta = parms
    parmsin = np.array(
        [ppFS, lgeps_e, lgeps_B, ppRS, lgRB, thetaJ, lgE52, lgdens, Eta])

    Fnutot = np.ones(len(tsec))

    for i in range(len(tsec)):
        FnuFS[i], FnuRS[i] = prog.aglightcurve(np.log10(nuobs[i]), tsec[i],
                                               parmsin)
        Fnutot[i] = FnuFS[i] + FnuRS[i]
    chi2 = (((Fnutot - fdata) / erdata)**2).sum()
    mxlik = -0.5 * chi2 - 0.5 * sum(np.log(2. * np.pi * erdata**2))
    return mxlik


'''
#--------------------------multinest------------------

nlive=1000
tol=0.3

#ppFS,lgeps_e,lgeps_B,thetaJ,lgE52,lgdens,Eta = parms
parameters=[r'$p_{FS}$',r'$log\/\epsilon_e$',r'$log\/\epsilon_B$',r'$\theta_j$',r'$log\/E_{52}$',r'$log\/n_0$',r'$\eta$']



solve(loglike, prior, n_dims=ndim, outputfiles_basename=datafile + '_', resume = True, verbose = False,n_live_points=nlive,sampling_efficiency=0.3)
json.dump(parameters, open(datafile + '_params.json', 'w')) # save parameter names

#-----------------------multinest OVER--------------------------
'''
Ejemplo n.º 26
0
	return params
parameter_names = ['strength', 'width', 'period', 'phase']


def model(x, s, w, p, phase):
	xwrapped = numpy.fmod(x / p - phase + 2, 1)
	return (1 - s) + s * scipy.stats.norm.pdf(xwrapped, 0.5, w / p)

def loglikelihood(params):
	s, w, p, phase = params
	prob = model(samples, s, w, p, phase)
	loglike = numpy.log(prob.mean(axis=1) + 1e-300).sum()
	print 'Like: %.1f' % loglike, params
	return loglike

sol = solve(loglikelihood, transform, n_dims=len(parameter_names),
	outputfiles_basename=prefix + '_gauss')
json.dump(parameter_names, open(prefix + '_gaussparams.json', 'w'))
print 'Evidence log Z = %.1f +- %.1f' % (sol['logZ'], sol['logZerr'])


x = numpy.linspace(5, 300, 4000)
plt.subplot(2, 1, 1)
plt.errorbar(x=data[:,0], xerr=data[:,1], y=numpy.random.normal(size=len(data)) + 0.5, linestyle=' ')
plt.xlim(x[0], x[-1])
plt.subplot(2, 1, 2)
plt.xlim(x[0], x[-1])
from posterierr.quantileshades import Shade
shade = Shade(x)
for s, w, p, phase in sol['samples'][:40]:
	s = 1
	y = model(x, s, w, p, phase)
if not os.path.exists("chains"): os.mkdir("chains")

# probability function, taken from the eggbox problem.

def myprior(cube):
    return cube * 10 * pi

def myloglike(cube):
    chi = (cos(cube / 2.)).prod()
    return (2. + chi)**5


# In[ ]:

# number of dimensions our problem has
parameters = ["x", "y"]
n_params = len(parameters)

# run MultiNest
result = solve(LogLikelihood=myloglike, Prior=myprior, 
    n_dims=n_params, outputfiles_basename="chains/3-")

print()
print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(parameters, result['samples'].transpose()):
    print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

Ejemplo n.º 28
0
def main(args):
    ndim = args.x_dim
    adaptive_nsteps = args.adapt_steps
    if adaptive_nsteps is None:
        adaptive_nsteps = False

    
    #C = 0.01
    r = 0.2
    # the shell thickness is 
    #w = (r**(ndim+1) + C * scipy.special.gamma((ndim+3)/2)*ndim*pi**(-(ndim+1)/2) / (
    #        scipy.special.gamma((ndim+2)/2) * pi**(-ndim/2)))**(1 / (ndim+1)) - r
    w = 0.001 / ndim
    
    r1, r2 = r, r
    w1, w2 = w, w
    c1, c2 = np.zeros(ndim) + 0.5, np.zeros(ndim) + 0.5
    c1[0] -= r1 / 2
    c2[0] += r2 / 2
    N1 = -0.5 * log(2 * pi * w1**2)
    N2 = -0.5 * log(2 * pi * w2**2)
    Z_analytic = log(shell_vol(ndim, r1, w1) + shell_vol(ndim, r2, w2))
    
    def loglike(theta):
        d1 = ((theta - c1)**2).sum(axis=1)**0.5
        d2 = ((theta - c2)**2).sum(axis=1)**0.5
        L1 = -0.5 * ((d1 - r1)**2) / w1**2 + N1
        L2 = -0.5 * ((d2 - r2)**2) / w2**2 + N2
        return np.logaddexp(L1, L2)

    def transform(x):
        return x
    
    def gradient(theta):
        delta1 = theta - c1
        delta2 = theta - c1
        d1 = (delta1**2).sum()**0.5
        d2 = (delta2**2).sum()**0.5
        g1 = -delta1 * (1 - r1 / d1) / w1**2
        g2 = -delta2 * (1 - r2 / d2) / w2**2
        return np.logaddexp(g1, g2)
    
    
    """
    N = 10000
    x = np.random.normal(size=(N, ndim))
    x *= (np.random.uniform(size=N)**(1./ndim) / (x**2).sum(axis=1)**0.5).reshape((-1, 1))
    x = x * r1 + c1
    print(loglike(x) - N1)
    print('%.3f%%' % ((loglike(x) - N1 > -ndim*2).mean() * 100))
    
    import sys; sys.exit()
    """
    
    paramnames = ['param%d' % (i+1) for i in range(ndim)]
    
    if args.pymultinest:
        from pymultinest.solve import solve
        
        def flat_loglike(theta):
            return loglike(theta.reshape((1, -1)))
        
        result = solve(LogLikelihood=flat_loglike, Prior=transform, 
            n_dims=ndim, outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
            verbose=True, resume=True, importance_nested_sampling=False)
        
        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
    
    elif args.reactive:
        if args.slice:
            log_dir = args.log_dir + 'RNS-%dd-slice%d' % (ndim, args.slice_steps)
        elif args.harm:
            log_dir = args.log_dir + 'RNS-%dd-harm%d' % (ndim, args.slice_steps)
        elif args.dyhmc:
            log_dir = args.log_dir + 'RNS-%dd-dyhmc%d' % (ndim, args.slice_steps)
        elif args.dychmc:
            log_dir = args.log_dir + 'RNS-%dd-dychmc%d' % (ndim, args.slice_steps)
        else:
            log_dir = args.log_dir + 'RNS-%dd' % (ndim)
        if adaptive_nsteps:
            log_dir = log_dir + '-adapt%s' % (adaptive_nsteps)
        
        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform, 
            log_dir=log_dir, resume=True,
            vectorized=True)
        if args.slice:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        if args.harm:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        #if args.dyhmc:
        #    import ultranest.dyhmc
        #    from ultranest.utils import verify_gradient
        #    verify_gradient(ndim, transform, loglike, transform_loglike_gradient, combination=True)
        #    sampler.stepsampler = ultranest.dyhmc.DynamicHMCSampler(ndim=ndim, nsteps=args.slice_steps, 
        #        transform_loglike_gradient=transform_loglike_gradient)
        if args.dychmc:
            import ultranest.dychmc
            from ultranest.utils import verify_gradient
            verify_gradient(ndim, transform, loglike, gradient, verbose=True)
            sampler.stepsampler = ultranest.dychmc.DynamicCHMCSampler(ndim=ndim, nsteps=args.slice_steps, 
                transform=transform, loglike=loglike, gradient=gradient)
        sampler.run(frac_remain=0.5, min_num_live_points=args.num_live_points, max_num_improvement_loops=1)
        sampler.print_results()
        if sampler.stepsampler is not None:
            sampler.stepsampler.plot(filename = log_dir + '/stepsampler_stats_region.pdf')
        if ndim <= 20:
            sampler.plot()
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames, loglike, transform=transform, 
            num_live_points=args.num_live_points, vectorized=True,
            log_dir=args.log_dir + '-%dd' % ndim, resume=True)
        sampler.run()
        sampler.print_results()
        sampler.plot()
    print("expected Z=%.3f (analytic solution)" % Z_analytic)
Ejemplo n.º 29
0
def main():
    dtype = "float64"
    w_bkg = 0.768331
    # set_gpu_mem_growth()
    tf.keras.backend.set_floatx(dtype)
    # open Resonances list as dict
    config_list = load_config_file("Resonances")

    data, bg, mcdata = prepare_data(dtype=dtype)  # ,model="4")

    amp = AllAmplitude(config_list)
    a = Cache_Model(amp, w_bkg, data, mcdata, bg=bg,
                    batch=65000)  # ,constrain={"Zc_4160_g0:0":(0.1,0.1)})
    try:
        with open("final_params.json") as f:
            param = json.load(f)
            # print("using init_params.json")
            if "value" in param:
                a.set_params(param["value"])
            else:
                a.set_params(param)
    except:
        pass
    # print(a.Amp(data))
    # exit()
    # pprint(a.get_params())
    # print(data,bg,mcdata)
    # t = time.time()
    # nll,g = a.cal_nll_gradient()#data_w,mcdata,weight=weights,batch=50000)
    # print("nll:",nll,"Time:",time.time()-t)
    # exit()
    fcn = FCN(a)

    # fit configure
    args = {}
    args_name = []
    x0 = []
    bnds = []
    bounds_dict = {"Zc_4160_m0:0": (4.1, 4.22), "Zc_4160_g0:0": (0, None)}

    for i in a.Amp.trainable_variables:
        args[i.name] = i.numpy()
        x0.append(i.numpy())
        args_name.append(i.name)
        if i.name in bounds_dict:
            bnds.append(bounds_dict[i.name])
        else:
            bnds.append((None, None))
        args["error_" + i.name] = 0.1

    bnds_cube = []
    for a, b in bnds:
        if a is None:
            if b is None:
                bnds_cube.append((200, -100))
            else:
                bnds_cube.append((b + 100, -100))
        else:
            if b is None:
                bnds_cube.append((100 - a, a))
            else:
                bnds_cube.append((b - a, a))
    # bnds_cube=[(0,1),(0,1)]
    cube_size = np.array(bnds_cube)

    def Prior(cube):
        cube[:] = cube * cube_size[:, 0] + cube_size[:, 1]
        return cube

    def LogLikelihood(cube):
        # return -np.sum(cube*cube)
        ret = fcn(cube)
        return -ret

    prefix = "chains/1-"
    with open("%sparams.json" % prefix, "w") as f:
        json.dump(args_name, f, indent=2)
    n_params = len(args_name)

    # we want to see some output while it is running
    # progress = pymultinest.ProgressPlotter(n_params = n_params, outputfiles_basename=prefix);
    # progress.start()
    # threading.Timer(30, show, [prefix+"phys_live.points.pdf"]).start() # delayed opening
    now = time.time()
    solution = solve(
        LogLikelihood,
        Prior,
        n_dims=n_params,
        outputfiles_basename=prefix,
        importance_nested_sampling=False,
        verbose=True,
    )
    print("Time for fitting:", time.time() - now)
    # progress.stop()
    # print(solution)
    print()
    print("evidence: %(logZ).1f +- %(logZerr).1f" % solution)
    print()
    print("parameter values:")
    for name, col in zip(args_name, solution["samples"].transpose()):
        print("%15s : %.3f +- %.3f" % (name, col.mean(), col.std()))
Ejemplo n.º 30
0
def main(args):
    ndim = args.x_dim
    sigma = args.sigma
    sigma = np.logspace(-1, np.log10(args.sigma), ndim)
    width = 1 - 5 * sigma
    width[width < 1e-20] = 1e-20
    centers = (np.sin(np.arange(ndim) / 2.) * width + 1.) / 2.
    #centers = np.ones(ndim) * 0.5

    adaptive_nsteps = args.adapt_steps
    if adaptive_nsteps is None:
        adaptive_nsteps = False

    def loglike(theta):
        like = -0.5 * (((theta - centers) / sigma)**2).sum(
            axis=1) - 0.5 * np.log(2 * np.pi * sigma**2).sum()
        return like

    def transform(x):
        return x

    def transform_loglike_gradient(u):
        theta = u
        like = -0.5 * (((theta - centers) / sigma)**2).sum(
            axis=1) - 0.5 * np.log(2 * np.pi * sigma**2).sum()
        grad = (theta - centers) / sigma
        return u, like, grad

    def gradient(theta):
        return (theta - centers) / sigma

    paramnames = ['param%d' % (i + 1) for i in range(ndim)]

    if args.pymultinest:
        from pymultinest.solve import solve

        def flat_loglike(theta):
            return loglike(theta.reshape((1, -1)))

        result = solve(LogLikelihood=flat_loglike,
                       Prior=transform,
                       n_dims=ndim,
                       outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
                       verbose=True,
                       resume=True,
                       importance_nested_sampling=False)

        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

    elif args.reactive:
        if args.slice:
            log_dir = args.log_dir + 'RNS-%dd-slice%d' % (ndim,
                                                          args.slice_steps)
        elif args.harm:
            log_dir = args.log_dir + 'RNS-%dd-harm%d' % (ndim,
                                                         args.slice_steps)
        elif args.dyhmc:
            log_dir = args.log_dir + 'RNS-%dd-dyhmc%d' % (ndim,
                                                          args.slice_steps)
        elif args.dychmc:
            log_dir = args.log_dir + 'RNS-%dd-dychmc%d' % (ndim,
                                                           args.slice_steps)
        else:
            log_dir = args.log_dir + 'RNS-%dd' % (ndim)
        if adaptive_nsteps:
            log_dir = log_dir + '-adapt%s' % (adaptive_nsteps)

        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames,
                                        loglike,
                                        transform=transform,
                                        log_dir=log_dir,
                                        resume=True,
                                        vectorized=True)
        if args.slice:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(
                nsteps=args.slice_steps,
                adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        if args.harm:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(
                nsteps=args.slice_steps,
                adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        if args.dyhmc:
            import ultranest.dyhmc
            from ultranest.utils import verify_gradient
            verify_gradient(ndim,
                            transform,
                            loglike,
                            transform_loglike_gradient,
                            combination=True)
            sampler.stepsampler = ultranest.dyhmc.DynamicHMCSampler(
                ndim=ndim,
                nsteps=args.slice_steps,
                transform_loglike_gradient=transform_loglike_gradient,
                adaptive_nsteps=adaptive_nsteps)
        if args.dychmc:
            import ultranest.dychmc
            from ultranest.utils import verify_gradient
            verify_gradient(ndim, transform, loglike, gradient)
            sampler.stepsampler = ultranest.dychmc.DynamicCHMCSampler(
                ndim=ndim,
                nsteps=args.slice_steps,
                transform=transform,
                loglike=loglike,
                gradient=gradient,
                adaptive_nsteps=adaptive_nsteps)
        sampler.run(frac_remain=0.5,
                    min_num_live_points=args.num_live_points,
                    max_num_improvement_loops=1)
        sampler.print_results()
        if sampler.stepsampler is not None:
            sampler.stepsampler.plot(filename=log_dir +
                                     '/stepsampler_stats_region.pdf')
        if ndim <= 20:
            sampler.plot()
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames,
                                loglike,
                                transform=transform,
                                num_live_points=args.num_live_points,
                                vectorized=True,
                                log_dir=args.log_dir + '-%dd' % ndim,
                                resume=True)
        sampler.run()
        sampler.print_results()
        sampler.plot()
Ejemplo n.º 31
0
def main(args):
    ndim = args.x_dim
    sigmas = 10**(-2.0 +
                  2.0 * np.cos(np.arange(ndim) - 2)) / (np.arange(ndim) - 2)
    sigmas[:2] = 1.0

    def transform(x):
        y = x  #.copy()
        #y[:,1::3] = 10**-y[:,1::3]
        #y[:,::3] *= x[:,2::3]
        return y

    centers = transform(np.ones((1, ndim)) * 0.2).flatten()
    degsigmas = 0.01
    crosssigmas = args.sigma

    # * sigmas[3:-1:] * sigmas[4::]

    def loglike(theta):
        # gaussian
        like = -0.5 * (np.abs(
            (theta[:, 1:] - centers[1:]) / sigmas[1:])**2).sum(axis=1)
        # non-linear degeneracy correlation
        like2 = -0.5 * (np.abs(
            (theta[:, 1] * theta[:, 0] - centers[1] * centers[0]) / degsigmas)
                        **2)  #.sum(axis=1)
        # pair-wise correlation
        a = (theta[:, 3:-1:] - centers[3:-1:]) / sigmas[3:-1:]
        b = (theta[:, 4::] - centers[4::]) / sigmas[4::]
        like3 = -0.5 * (np.abs((a - b) / crosssigmas)**2).sum(axis=1)
        return like + like2 + like3

    print(centers, crosssigmas, sigmas)
    import string
    paramnames = list(string.ascii_lowercase)[:ndim]

    if args.pymultinest:
        from pymultinest.solve import solve
        import json

        def flat_loglike(theta):
            return loglike(theta.reshape((1, -1))).flatten()

        def flat_transform(cube):
            return transform(cube.reshape((1, -1))).flatten()

        result = solve(LogLikelihood=flat_loglike,
                       Prior=flat_transform,
                       n_dims=ndim,
                       outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
                       verbose=True,
                       resume=True,
                       n_live_points=args.num_live_points,
                       importance_nested_sampling=False)
        json.dump(paramnames,
                  open(args.log_dir + 'MN-%ddparams.json' % ndim, 'w'))
        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

    elif args.reactive:
        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames,
                                        loglike,
                                        transform=transform,
                                        log_dir=args.log_dir +
                                        'RNS-%dd' % ndim,
                                        vectorized=True)
        sampler.run(frac_remain=0.5,
                    min_ess=400,
                    min_num_live_points=args.num_live_points)
        sampler.print_results()
        sampler.plot()
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames,
                                loglike,
                                transform=transform,
                                num_live_points=args.num_live_points,
                                vectorized=True,
                                log_dir=args.log_dir + '-%dd' % ndim)
        sampler.run()
        sampler.print_results()
        sampler.plot()
Ejemplo n.º 32
0
def main(args):

    np.random.seed(2)
    Ndata = args.ndata
    jitter_true = 0.1
    phase_true = 0.
    period_true = 180
    amplitude_true = args.contrast / Ndata * jitter_true
    paramnames = ['amplitude', 'jitter', 'phase', 'period']
    ndim = 4
    derivednames = [] #'frequency']
    wrapped_params = [False, False, True, False]
    #wrapped_params = None
    
    x = np.linspace(0, 360, 1000)
    y = amplitude_true * sin(x / period_true * 2 * pi + phase_true)
    
    if True:
        plt.plot(x, y)
        x = np.random.uniform(0, 360, Ndata)
        y = np.random.normal(amplitude_true * sin(x / period_true * 2 * pi + phase_true), jitter_true)
        plt.errorbar(x, y, yerr=jitter_true, marker='x', ls=' ')
        plt.savefig('testsine.pdf', bbox_inches='tight')
        plt.close()
    
    
    def loglike(params):
        amplitude, jitter, phase, period = params.transpose()[:4]
        predicty = amplitude * sin(x.reshape((-1,1)) / period * 2 * pi + phase)
        logl = (-0.5 * log(2 * pi * jitter**2) - 0.5 * ((predicty - y.reshape((-1,1))) / jitter)**2).sum(axis=0)
        assert logl.shape == jitter.shape
        return logl
    
    def transform(x):
        z = np.empty((len(x), 4))
        z[:,0] = 10**(x[:,0] * 4 - 2)
        z[:,1] = 10**(x[:,1] * 1 - 1.5)
        z[:,2] = 2 * pi * x[:,2]
        z[:,3] = 10**(x[:,3] * 4 - 1)
        #z[:,4] = 2 * pi / x[:,3]
        return z

    loglike(transform(np.ones((2, ndim))*0.5))
    if args.pymultinest:
        from pymultinest.solve import solve
        global Lmax
        Lmax = -np.inf
        
        def flat_loglike(theta):
            L = loglike(theta.reshape((1, -1)))[0]
            global Lmax
            if L > Lmax:
                print("Like: %.2f" % L)
                Lmax = L
            return L
        
        def flat_transform(cube):
            return transform(cube.reshape((1, -1)))[0]
        
        result = solve(LogLikelihood=flat_loglike, Prior=flat_transform, 
            n_dims=ndim, outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
            n_live_points=args.num_live_points,
            verbose=True, resume=False, importance_nested_sampling=False)
        
        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
        return
    
    elif args.reactive:
        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform, 
            log_dir=args.log_dir, vectorized=True,
            derived_param_names=derivednames, wrapped_params=wrapped_params,
            resume='overwrite')
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames, loglike, transform=transform, 
            log_dir=args.log_dir, vectorized=True,
            derived_param_names=derivednames, wrapped_params=wrapped_params,
            resume='overwrite')
    
    sampler.run(min_num_live_points=args.num_live_points)
        
    print()
    sampler.plot()
    
    for i, p in enumerate(paramnames + derivednames):
        v = sampler.results['samples'][:,i]
        print('%20s: %5.3f +- %5.3f' % (p, v.mean(), v.std()))
Ejemplo n.º 33
0
def myloglike(cube):
    chi = (cos(cube / 2.) + 1).prod()
    return (2. + chi)**5


# number of dimensions our problem has
parameters = ["x", "y"]
n_params = len(parameters)
# name of the output files
prefix = "chains_2/3-"

# run MultiNest
result = solve(LogLikelihood=myloglike,
               Prior=myprior,
               n_dims=n_params,
               outputfiles_basename=prefix,
               verbose=True)

print()
#print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(parameters, result['samples'].transpose()):
    print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

# lets analyse the results
a = pymultinest.Analyzer(n_params=n_params, outputfiles_basename=prefix)
s = a.get_stats()

# make marginal plots by running:
from numpy import pi, cos
from pymultinest.solve import solve
import os
if not os.path.exists("chains"): os.mkdir("chains")

# probability function, taken from the eggbox problem.

def myprior(cube):
	return cube * 10 * pi

def myloglike(cube):
	chi = (cos(cube / 2.)).prod()
	return (2. + chi)**5

# number of dimensions our problem has
parameters = ["x", "y"]
n_params = len(parameters)

# run MultiNest
result = solve(LogLikelihood=myloglike, Prior=myprior, 
	n_dims=n_params, outputfiles_basename="chains/3-")

print()
print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(parameters, result['samples'].transpose()):
	print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))