Esempio n. 1
0
def test_reactive_run():
    from ultranest import ReactiveNestedSampler
    np.random.seed(1)
    evals = set()

    def loglike(z):
        #print(loglike.ncalls, z[0,0])
        [evals.add(str(x[0])) for x in z]
        a = np.array([
            -0.5 * sum([((xi - 0.83456 + i * 0.1) / 0.5)**2
                        for i, xi in enumerate(x)]) for x in z
        ])
        b = np.array([
            -0.5 * sum([((xi - 0.43456 - i * 0.1) / 0.5)**2
                        for i, xi in enumerate(x)]) for x in z
        ])
        loglike.ncalls += len(a)
        return np.logaddexp(a, b)

    loglike.ncalls = 0

    def transform(x):
        return 10. * x - 5.

    paramnames = ['Hinz', 'Kunz']

    sampler = ReactiveNestedSampler(paramnames,
                                    loglike,
                                    transform=transform,
                                    draw_multiple=False,
                                    vectorized=True)
    r = sampler.run(log_interval=50, min_num_live_points=400)
    ncalls = loglike.ncalls
    nunique = len(evals)
    if sampler.mpi_size > 1:
        ncalls = sampler.comm.gather(ncalls, root=0)
        if sampler.mpi_rank == 0:
            print("ncalls on the different MPI ranks:", ncalls)
        ncalls = sum(sampler.comm.bcast(ncalls, root=0))

        allevals = sampler.comm.gather(evals, root=0)
        if sampler.mpi_rank == 0:
            print("evals on the different MPI ranks:",
                  [len(e) for e in allevals])
            allevals = len(set.union(*allevals))
        else:
            allevals = None
        nunique = sampler.comm.bcast(allevals, root=0)

    if sampler.mpi_rank == 0:
        print('ncalls:', ncalls, 'nunique:', nunique)

    assert abs(r['ncall'] - ncalls) <= 2 * sampler.mpi_size, (r['ncall'],
                                                              ncalls)
    assert ncalls == nunique, (ncalls, nunique)
    if sampler.mpi_rank == 0:
        open('nestedsampling_reactive_results.txt',
             'a').write("%.3f\n" % r['logz'])
    sampler.plot()
Esempio n. 2
0
def test_reactive_run_resume_eggbox():
    from ultranest import ReactiveNestedSampler

    def loglike(z):
        chi = (np.cos(z / 2.)).prod(axis=1)
        loglike.ncalls += len(z)
        return (2. + chi)**5

    loglike.ncalls = 0

    def transform(x):
        return x * 10 * np.pi

    paramnames = ['a', 'b']

    #last_results = None
    folder = tempfile.mkdtemp()
    np.random.seed(1)
    try:
        for i in range(2):
            print()
            print("====== Running Eggbox problem [%d] =====" % (i + 1))
            print()
            sampler = ReactiveNestedSampler(paramnames,
                                            loglike,
                                            transform=transform,
                                            log_dir=folder,
                                            resume=True,
                                            vectorized=True)
            initial_ncalls = int(sampler.ncall)
            loglike.ncalls = 0
            r = sampler.run(max_iters=200 + i * 200,
                            max_num_improvement_loops=1,
                            min_num_live_points=100,
                            cluster_num_live_points=0)
            sampler.print_results()
            sampler.pointstore.close()
            print(loglike.ncalls, r['ncall'], initial_ncalls)

            ncalls = loglike.ncalls
            if sampler.mpi_size > 1:
                ncalls = sampler.comm.gather(ncalls, root=0)
                if sampler.mpi_rank == 0:
                    print("ncalls on the different MPI ranks:", ncalls)
                ncalls = sum(sampler.comm.bcast(ncalls, root=0))
            ncalls = ncalls + initial_ncalls
            assert abs(r['ncall'] - ncalls) <= 2 * sampler.mpi_size, (
                i, r['ncall'], ncalls, r['ncall'] - ncalls)
            #last_results = r
    finally:
        shutil.rmtree(folder, ignore_errors=True)
Esempio n. 3
0
def test_run_resume(dlogz):
    from ultranest import ReactiveNestedSampler
    sigma = 0.01
    ndim = 1

    def loglike(theta):
        like = -0.5 * (((theta - 0.5) / sigma)**2).sum(axis=1) - 0.5 * np.log(
            2 * np.pi * sigma**2) * ndim
        return like

    def transform(x):
        return x

    paramnames = ['a']

    def myadd(row):
        assert False, (row, 'should not need to add more points in resume')

    last_results = None
    #for dlogz in 0.5, 0.1, 0.01:
    np.random.seed(int(dlogz * 100))
    folder = tempfile.mkdtemp()
    try:
        for i in range(2):
            sampler = ReactiveNestedSampler(paramnames,
                                            loglike,
                                            transform=transform,
                                            log_dir=folder,
                                            resume=True,
                                            vectorized=True)
            r = sampler.run(log_interval=50,
                            dlogz=dlogz,
                            min_num_live_points=400)
            sampler.print_results()
            sampler.pointstore.close()
            if i == 1:
                sampler.pointstore.add = myadd
            del r['weighted_samples']
            del r['samples']
            if last_results is not None:
                print("ran with dlogz:", dlogz)
                print("first run gave:", last_results)
                print("second run gave:", r)
                assert last_results['logzerr'] < 1.0
                assert r['logzerr'] < 1.0
                assert np.isclose(last_results['logz'], r['logz'], atol=0.5)
            last_results = r
    finally:
        shutil.rmtree(folder, ignore_errors=True)
Esempio n. 4
0
def test_return_summary():
    from ultranest import ReactiveNestedSampler
    sigma = np.array([0.1, 0.01])
    centers = np.array([0.5, 0.75])
    paramnames = ['a', 'b']
    ndim = len(paramnames)

    def loglike(theta):
        like = -0.5 * (((theta - centers) / sigma)**2) - 0.5 * np.log(
            2 * np.pi * sigma**2) * ndim
        return like.sum()

    def transform(x):
        return x

    sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform)
    r = sampler.run()

    print(r)
    assert r['paramnames'] == paramnames
    assert r['niter'] > 100
    assert -10 < r['logz'] < 10
    assert 0.01 < r['logzerr'] < 0.5
    assert 1 < r['ess'] < 10000
    assert 0.4 < r['posterior']['mean'][0] < 0.6
    assert 0.74 < r['posterior']['mean'][1] < 0.76
    assert 0.4 < r['posterior']['median'][0] < 0.6
    assert 0.74 < r['posterior']['median'][1] < 0.76
    assert 0.05 < r['posterior']['stdev'][0] < 0.2
    assert 0.005 < r['posterior']['stdev'][1] < 0.02

    assert 0.35 < r['posterior']['errlo'][0] < 0.45
    assert 0.72 < r['posterior']['errlo'][1] < 0.75
    assert 0.55 < r['posterior']['errup'][0] < 0.65
    assert 0.75 < r['posterior']['errup'][1] < 0.78

    N, ndim2 = r['samples'].shape
    assert ndim2 == ndim
    assert N > 10
    N, ndim2 = r['weighted_samples']['points'].shape
    assert ndim2 == ndim
    assert N > 10

    assert r['weighted_samples']['logw'].shape == (N, )
    assert r['weighted_samples']['weights'].shape == (N, )
    assert r['weighted_samples']['bootstrapped_weights'].shape[0] == N
    assert r['weighted_samples']['logl'].shape == (N, )
Esempio n. 5
0
def test_reactive_run_extraparams():
    from ultranest import ReactiveNestedSampler
    np.random.seed(1)

    def loglike(z):
        return -0.5 * z[-1].sum()
    loglike.ncalls = 0

    def transform(x):
        z = 10. * x - 5.
        return np.append(z, np.abs(z).sum())

    paramnames = ['Hinz', 'Kunz']

    sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
        derived_param_names=['ctr_distance'])
    r = sampler.run()
    assert r['samples'].shape[1] == 3
    sampler.plot()
Esempio n. 6
0
def test_dlogz_reactive_run_SLOW():
    from ultranest import ReactiveNestedSampler
    import ultranest.mlfriends

    def loglike(y):
        return -0.5 * np.sum(((y - 0.5)/0.001)**2, axis=1)

    paramnames = ['Hinz', 'Kunz']

    sampler = ReactiveNestedSampler(paramnames, loglike, vectorized=True)
    print("running for ess")
    firstresults = sampler.run(min_num_live_points=50, cluster_num_live_points=0, max_num_improvement_loops=3, min_ess=10000, viz_callback=None)
    print()
    print({k:v for k, v in firstresults.items() if 'logzerr' in k})
    print()
    assert firstresults['logzerr'] > 0.1 * 2
    print("running again for logz")
    for niter, results in enumerate(sampler.run_iter(min_num_live_points=1, cluster_num_live_points=0, max_num_improvement_loops=10, dlogz=0.1, viz_callback=None, region_class=ultranest.mlfriends.RobustEllipsoidRegion)):
        print("logzerr in iteration %d" % niter, results['logzerr'])
    print()
    print({k:v for k, v in results.items() if 'logzerr' in k})
    assert results['logzerr'] < 0.1 * 2
Esempio n. 7
0
def test_stepsampler_regionmh(plot=False):
    np.random.seed(2)
    sampler = ReactiveNestedSampler(paramnames, loglike_vectorized, transform=transform, vectorized=True)
    sampler.stepsampler = RegionMHSampler(nsteps=4 * len(paramnames))
    r = sampler.run(log_interval=50, min_num_live_points=400)
    sampler.print_results()
    a = (np.abs(r['samples'] - 0.7) < 0.1).all(axis=1)
    b = (np.abs(r['samples'] - 0.3) < 0.1).all(axis=1)
    assert a.sum() > 1, a
    assert b.sum() > 1, b
Esempio n. 8
0
def test_stepsampler_regionslice(plot=False):
    np.random.seed(4)
    sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform)
    sampler.stepsampler = RegionSliceSampler(nsteps=len(paramnames))
    r = sampler.run(log_interval=50, min_num_live_points=400)
    sampler.print_results()
    a = (np.abs(r['samples'] - 0.7) < 0.1).all(axis=1)
    b = (np.abs(r['samples'] - 0.3) < 0.1).all(axis=1)
    assert a.sum() > 1
    assert b.sum() > 1
Esempio n. 9
0
def test_stepsampler_variable_speed_SLOW(plot=False):
    matrices = [
        np.array([[True, True, True], [False, True, True], [False, False, True]]),
        [Ellipsis, slice(1,None), slice(2,4)]
    ]
    for i, matrix in enumerate(matrices):
        np.random.seed(42 + i)
        sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform)
        sampler.stepsampler = SpeedVariableRegionSliceSampler(matrix)
        r = sampler.run(log_interval=50, min_num_live_points=400)
        sampler.print_results()
        a = (np.abs(r['samples'] - 0.7) < 0.1).all(axis=1)
        b = (np.abs(r['samples'] - 0.3) < 0.1).all(axis=1)
        assert a.sum() > 1
        assert b.sum() > 1
Esempio n. 10
0
def test_stepsampler_cubeslice(plot=False):
    np.random.seed(3)
    nsteps = np.random.randint(10, 50)
    popsize = np.random.randint(1, 20)
    sampler = ReactiveNestedSampler(paramnames,
                                    loglike_vectorized,
                                    transform=transform,
                                    vectorized=True)

    sampler.stepsampler = PopulationSliceSampler(
        popsize=popsize,
        nsteps=nsteps,
        generate_direction=generate_cube_oriented_direction,
    )
    r = sampler.run(viz_callback=None, log_interval=50)
    sampler.print_results()
    a = (np.abs(r['samples'] - 0.7) < 0.1).all(axis=1)
    b = (np.abs(r['samples'] - 0.3) < 0.1).all(axis=1)
    assert a.sum() > 1
    assert b.sum() > 1
Esempio n. 11
0
def main(args):
    np.random.seed(2)
    ndim = args.x_dim
    sigma = args.sigma
    centers = np.sin(np.arange(ndim) / 2.)
    data = np.random.normal(centers, sigma).reshape((1, -1))

    def loglike(theta):
        sigma = 10**theta[:,0]
        like = -0.5 * (((theta[:,1:] - data)/sigma.reshape((-1, 1)))**2).sum(axis=1) - 0.5 * log(2 * np.pi * sigma**2) * ndim
        return like

    def transform(x):
        z = x * 20 - 10
        z[:,0] = x[:,0] * 6 - 3
        return z
    
    import string
    paramnames = ['sigma'] + list(string.ascii_lowercase)[:ndim]
    
    if args.reactive:
        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform, 
            log_dir=args.log_dir + 'RNS-%dd' % ndim, vectorized=True,
            resume=True, viz_callback = False, show_status = False)
        sampler.run(log_interval=20, min_num_live_points=args.num_live_points)
        sampler.plot()
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames, loglike, transform=transform, 
            num_live_points=args.num_live_points, vectorized=True,
            log_dir=args.log_dir + '-%dd' % ndim, resume=True)
        sampler.run()
        sampler.plot()
Esempio n. 12
0
def main(args):
    ndim = args.x_dim
    adaptive_nsteps = args.adapt_steps
    if adaptive_nsteps is None:
        adaptive_nsteps = False

    rv1a = scipy.stats.loggamma(1, loc=2. / 3, scale=1. / 30)
    rv1b = scipy.stats.loggamma(1, loc=1. / 3, scale=1. / 30)
    rv2a = scipy.stats.norm(2. / 3, 1. / 30)
    rv2b = scipy.stats.norm(1. / 3, 1. / 30)
    rv_rest = []
    for i in range(2, ndim):
        if i <= (ndim + 2) / 2:
            rv = scipy.stats.loggamma(1, loc=2. / 3., scale=1. / 30)
        else:
            rv = scipy.stats.norm(2. / 3, 1. / 30)
        rv_rest.append(rv)
        del rv

    def loglike(theta):
        L1 = log(0.5 * rv1a.pdf(theta[:, 0]) + 0.5 * rv1b.pdf(theta[:, 0]) +
                 1e-300)
        L2 = log(0.5 * rv2a.pdf(theta[:, 1]) + 0.5 * rv2b.pdf(theta[:, 1]) +
                 1e-300)
        Lrest = np.sum(
            [rv.logpdf(t) for rv, t in zip(rv_rest, theta[:, 2:].transpose())],
            axis=0)
        #assert L1.shape == (len(theta),)
        #assert L2.shape == (len(theta),)
        #assert Lrest.shape == (len(theta),), Lrest.shape
        like = L1 + L2 + Lrest
        like = np.where(like < -1e300,
                        -1e300 - ((np.asarray(theta) - 0.5)**2).sum(), like)
        assert like.shape == (len(theta), ), (like.shape, theta.shape)
        return like

    def transform(x):
        return x

    paramnames = ['param%d' % (i + 1) for i in range(ndim)]

    if args.pymultinest:
        from pymultinest.solve import solve

        def flat_loglike(theta):
            return loglike(theta.reshape((1, -1)))

        result = solve(LogLikelihood=flat_loglike,
                       Prior=transform,
                       n_dims=ndim,
                       outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
                       verbose=True,
                       resume=True,
                       importance_nested_sampling=False)

        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

    elif args.reactive:
        if args.slice:
            log_dir = args.log_dir + 'RNS-%dd-slice%d' % (ndim,
                                                          args.slice_steps)
        elif args.harm:
            log_dir = args.log_dir + 'RNS-%dd-harm%d' % (ndim,
                                                         args.slice_steps)
        elif args.dyhmc:
            log_dir = args.log_dir + 'RNS-%dd-dyhmc%d' % (ndim,
                                                          args.slice_steps)
        elif args.dychmc:
            log_dir = args.log_dir + 'RNS-%dd-dychmc%d' % (ndim,
                                                           args.slice_steps)
        else:
            log_dir = args.log_dir + 'RNS-%dd' % (ndim)
        if adaptive_nsteps:
            log_dir = log_dir + '-adapt%s' % (adaptive_nsteps)

        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames,
                                        loglike,
                                        transform=transform,
                                        log_dir=log_dir,
                                        resume=True,
                                        vectorized=True)
        if args.slice:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(
                nsteps=args.slice_steps,
                adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w')
                if sampler.mpi_rank == 0 else False)
        if args.harm:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(
                nsteps=args.slice_steps,
                adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w')
                if sampler.mpi_rank == 0 else False)
        #if args.dyhmc:
        #    import ultranest.dyhmc
        #    verify_gradient(ndim, transform, loglike, transform_loglike_gradient, combination=True)
        #    sampler.stepsampler = ultranest.dyhmc.DynamicHMCSampler(ndim=ndim, nsteps=args.slice_steps,
        #        transform_loglike_gradient=transform_loglike_gradient, adaptive_nsteps=adaptive_nsteps)
        #if args.dychmc:
        #    import ultranest.dychmc
        #    verify_gradient(ndim, transform, loglike, gradient)
        #    sampler.stepsampler = ultranest.dychmc.DynamicCHMCSampler(ndim=ndim, nsteps=args.slice_steps,
        #        transform=transform, loglike=loglike, gradient=gradient, adaptive_nsteps=adaptive_nsteps)
        sampler.run(frac_remain=0.5,
                    min_num_live_points=args.num_live_points,
                    max_num_improvement_loops=1)
        sampler.print_results()
        if sampler.stepsampler is not None:
            sampler.stepsampler.plot(filename=log_dir +
                                     '/stepsampler_stats_region.pdf')
        if ndim <= 20:
            sampler.plot()
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames,
                                loglike,
                                transform=transform,
                                num_live_points=args.num_live_points,
                                vectorized=True,
                                log_dir=args.log_dir + '-%dd' % ndim,
                                resume=True)
        sampler.run()
        sampler.print_results()
        sampler.plot()
Esempio n. 13
0
# this version uses one parameter vector per function call
# because function calls are expensive, the runcpp.py way is more efficient and recommended

mycpplib = ctypes.CDLL("mycpplib.so")

# define the arguments of the functions and return values
mycpplib.my_cpp_transform.argtypes = [
    np.ctypeslib.ndpointer(dtype=np.float64, ndim=1, flags='C_CONTIGUOUS'),
    ctypes.c_size_t]

mycpplib.my_cpp_likelihood.argtypes = [
    np.ctypeslib.ndpointer(dtype=np.float64, ndim=1, flags='C_CONTIGUOUS'),
    ctypes.c_size_t]
mycpplib.my_cpp_likelihood.restype = ctypes.c_double


def mytransformwrapper(cube):
	params = cube.copy()
	mycpplib.my_cpp_transform(params, params.size)
	return params

def mylikelihoodwrapper(params):
	return mycpplib.my_cpp_likelihood(params, params.size)

paramnames = ["a", "b", "c"]
sampler = ReactiveNestedSampler(paramnames, mylikelihoodwrapper, transform=mytransformwrapper)
sampler.run()
sampler.print_results()
sampler.plot()
Esempio n. 14
0
import numpy as np
from ultranest import ReactiveNestedSampler


def mytransform(cube):
    return cube * 2 - 1


def mylikelihood(params):
    centers = 0.1 * np.arange(params.shape[1]).reshape((1, -1))
    return -0.5 * (((params - centers) / 0.01)**2).sum(axis=1)


paramnames = ["a", "b", "c"]
sampler = ReactiveNestedSampler(paramnames,
                                mylikelihood,
                                transform=mytransform,
                                vectorized=True)
sampler.run()
sampler.print_results()
sampler.plot()
Esempio n. 15
0
def main(args):
    ndim = args.x_dim
    sigmas = 10**(-2.0 +
                  2.0 * np.cos(np.arange(ndim) - 2)) / (np.arange(ndim) - 2)
    sigmas[:2] = 1.0

    def transform(x):
        y = x  #.copy()
        #y[:,1::3] = 10**-y[:,1::3]
        #y[:,::3] *= x[:,2::3]
        return y

    centers = transform(np.ones((1, ndim)) * 0.2).flatten()
    degsigmas = 0.01
    crosssigmas = args.sigma

    # * sigmas[3:-1:] * sigmas[4::]

    def loglike(theta):
        # gaussian
        like = -0.5 * (np.abs(
            (theta[:, 1:] - centers[1:]) / sigmas[1:])**2).sum(axis=1)
        # non-linear degeneracy correlation
        like2 = -0.5 * (np.abs(
            (theta[:, 1] * theta[:, 0] - centers[1] * centers[0]) / degsigmas)
                        **2)  #.sum(axis=1)
        # pair-wise correlation
        a = (theta[:, 3:-1:] - centers[3:-1:]) / sigmas[3:-1:]
        b = (theta[:, 4::] - centers[4::]) / sigmas[4::]
        like3 = -0.5 * (np.abs((a - b) / crosssigmas)**2).sum(axis=1)
        return like + like2 + like3

    print(centers, crosssigmas, sigmas)
    import string
    paramnames = list(string.ascii_lowercase)[:ndim]

    if args.pymultinest:
        from pymultinest.solve import solve
        import json

        def flat_loglike(theta):
            return loglike(theta.reshape((1, -1))).flatten()

        def flat_transform(cube):
            return transform(cube.reshape((1, -1))).flatten()

        result = solve(LogLikelihood=flat_loglike,
                       Prior=flat_transform,
                       n_dims=ndim,
                       outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
                       verbose=True,
                       resume=True,
                       n_live_points=args.num_live_points,
                       importance_nested_sampling=False)
        json.dump(paramnames,
                  open(args.log_dir + 'MN-%ddparams.json' % ndim, 'w'))
        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

    elif args.reactive:
        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames,
                                        loglike,
                                        transform=transform,
                                        log_dir=args.log_dir +
                                        'RNS-%dd' % ndim,
                                        vectorized=True)
        sampler.run(frac_remain=0.5,
                    min_ess=400,
                    min_num_live_points=args.num_live_points)
        sampler.print_results()
        sampler.plot()
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames,
                                loglike,
                                transform=transform,
                                num_live_points=args.num_live_points,
                                vectorized=True,
                                log_dir=args.log_dir + '-%dd' % ndim)
        sampler.run()
        sampler.print_results()
        sampler.plot()
Esempio n. 16
0
def test_overclustering_eggbox_update():
    from ultranest import ReactiveNestedSampler
    np.random.seed(1)
    for i in [20, 23, 24, 27, 42]:
        print()
        print("==== TEST CASE %d =====================" % i)
        print()
        mock = MockIntegrator()
        print("loading...")
        data = np.load(os.path.join(here, "overclustered_%d.npz" % i))
        print("loading... done")

        nsamples, mock.x_dim = data['u0'].shape
        noverlap = 0
        for i, u1 in enumerate(data['u']):
            assert len((u1 == data['u0']).all(axis=1)) == nsamples
            noverlap += (u1 == data['u0']).all(axis=1).sum()
        print("u0:%d -> u:%d : %d points are common" %
              (nsamples, nsamples, noverlap))

        ReactiveNestedSampler._update_region(mock, data['u0'], data['u0'])
        nclusters = mock.transformLayer.nclusters
        print("initialised with: r=%e nc=%d" %
              (mock.region.maxradiussq, nclusters))
        smallest_cluster = min(
            (mock.transformLayer.clusterids == i).sum()
            for i in np.unique(mock.transformLayer.clusterids))
        if smallest_cluster == 1:
            print("found lonely points")

        print(" --- intermediate tests how create_new reacts ---")
        nextTransformLayer = mock.transformLayer.create_new(
            data['u0'], mock.region.maxradiussq)
        print("updated to (with same data): r=%e nc=%d" %
              (mock.region.maxradiussq, nclusters))
        smallest_cluster = min(
            (nextTransformLayer.clusterids == i).sum()
            for i in np.unique(nextTransformLayer.clusterids))
        assert smallest_cluster > 1, ("found lonely points", i, nclusters,
                                      np.unique(mock.transformLayer.clusterids,
                                                return_counts=True))

        nextTransformLayer = mock.transformLayer.create_new(
            data['u'], mock.region.maxradiussq)
        nclusters = nextTransformLayer.nclusters
        print("updated to (with new data): r=%e nc=%d" %
              (mock.region.maxradiussq, nclusters))
        smallest_cluster = min(
            (nextTransformLayer.clusterids == i).sum()
            for i in np.unique(nextTransformLayer.clusterids))
        if smallest_cluster > 1:
            # this happens because mock.region.maxradiussq is not valid anymore
            # when nlive changes
            print(
                "found lonely points", i, nclusters,
                np.unique(mock.transformLayer.clusterids, return_counts=True))

        if False:
            for xi0, yi0, xi, yi in zip(data['u0'][:, 0], data['u0'][:, 1],
                                        data['u'][:, 0], data['u'][:, 1]):
                plt.plot([xi0, xi], [yi0, yi], 'x-', ms=2)

            plt.savefig('testoverclustering_eggbox_%d_diff.pdf' % i,
                        bbox_inches='tight')
            plt.close()
        print(" --- end ---")

        if len(data['u']) < nsamples or True:
            # maxradius has to be invalidated if live points change
            print("setting maxradiussq to None")
            mock.region.maxradiussq = None

        updated = ReactiveNestedSampler._update_region(mock, data['u'],
                                                       data['u'])
        nclusters = mock.transformLayer.nclusters
        print("transitioned to : r=%e nc=%d %s" %
              (mock.region.maxradiussq, nclusters, updated))
        smallest_cluster = min(
            (mock.transformLayer.clusterids == i).sum()
            for i in np.unique(mock.transformLayer.clusterids))
        if smallest_cluster == 1:
            print("found lonely points")
        for k in np.unique(mock.transformLayer.clusterids):
            x, y = mock.region.u[mock.transformLayer.clusterids ==
                                 k].transpose()
            print('cluster %d/%d: %d points @ %.5f +- %.5f , %.5f +- %.5f' %
                  (k, nclusters, len(x), x.mean(), x.std(), y.mean(), y.std()))
        if False:
            plt.title('nclusters: %d' % nclusters)
            for k in np.unique(mock.transformLayer.clusterids):
                x, y = mock.region.u[mock.transformLayer.clusterids ==
                                     k].transpose()
                plt.scatter(x, y, s=2)

            plt.savefig('testoverclustering_eggbox_%d.pdf' % i,
                        bbox_inches='tight')
            plt.close()
        assert 14 < nclusters < 20, (nclusters, i)
        assert smallest_cluster > 1, (i, nclusters,
                                      np.unique(mock.transformLayer.clusterids,
                                                return_counts=True))
Esempio n. 17
0
def main(args):
    def loglike(z):
        chi = (2. + (cos(z[:, :2] / 2.)).prod(axis=1))**5
        chi2 = -np.abs((z - 5 * pi) / 0.5).sum(axis=1)
        return chi + chi2

    def transform(x):
        return x * 100

    import string
    paramnames = list(string.ascii_lowercase)[:args.x_dim]

    if args.reactive:
        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames,
                                        loglike,
                                        transform=transform,
                                        log_dir=args.log_dir +
                                        'RNS-%dd' % args.x_dim,
                                        resume=True,
                                        vectorized=True)
        #log_dir=None)
        sampler.run(log_interval=20, min_num_live_points=args.num_live_points)
        sampler.plot()
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames,
                                loglike,
                                transform=transform,
                                num_live_points=args.num_live_points,
                                vectorized=True,
                                log_dir=args.log_dir + '%dd' % args.x_dim,
                                resume=True)
        #log_dir=None)
        sampler.run(log_interval=20)
        sampler.plot()
Esempio n. 18
0
def main(args):
    ndim = args.x_dim
    adaptive_nsteps = args.adapt_steps
    if adaptive_nsteps is None:
        adaptive_nsteps = False

    def flat_loglike(theta):
        delta = np.max(np.abs(theta - 0.5))
        volume_enclosed = ndim * np.log(delta + 1e-15)
        if volume_enclosed > -100:
            return -volume_enclosed
        else:
            return +100

    def loglike(theta):
        delta = np.max(np.abs(theta - 0.5), axis=1)
        volume_enclosed = ndim * np.log(delta + 1e-15)
        like = -volume_enclosed
        like[~(like < +100)] = 100
        return like

    def flat_transform(x):
        return x
    def transform(x):
        return x
    
    paramnames = ['param%d' % (i+1) for i in range(ndim)]
    
    if args.pymultinest:
        from pymultinest.solve import solve
        
        result = solve(LogLikelihood=flat_loglike, Prior=flat_transform, 
            n_dims=ndim, outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
            verbose=True, resume=True, importance_nested_sampling=False)
        
        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
    
    elif args.reactive:
        if args.slice:
            log_dir = args.log_dir + 'RNS-%dd-slice%d' % (ndim, args.slice_steps)
        elif args.harm:
            log_dir = args.log_dir + 'RNS-%dd-harm%d' % (ndim, args.slice_steps)
        elif args.dyhmc:
            log_dir = args.log_dir + 'RNS-%dd-dyhmc%d' % (ndim, args.slice_steps)
        elif args.dychmc:
            log_dir = args.log_dir + 'RNS-%dd-dychmc%d' % (ndim, args.slice_steps)
        else:
            log_dir = args.log_dir + 'RNS-%dd' % (ndim)
        if adaptive_nsteps:
            log_dir = log_dir + '-adapt%s' % (adaptive_nsteps)
        
        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform, 
            log_dir=log_dir, resume=True,
            vectorized=True)
        if args.slice:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        if args.harm:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        """
        if args.dyhmc:
            import ultranest.dyhmc
            from ultranest.utils import verify_gradient
            verify_gradient(ndim, transform, loglike, transform_loglike_gradient, combination=True)
            sampler.stepsampler = ultranest.dyhmc.DynamicHMCSampler(ndim=ndim, nsteps=args.slice_steps, 
                transform_loglike_gradient=transform_loglike_gradient)
        if args.dychmc:
            import ultranest.dychmc
            from ultranest.utils import verify_gradient
            verify_gradient(ndim, transform, loglike, gradient, verbose=True)
            sampler.stepsampler = ultranest.dychmc.DynamicCHMCSampler(ndim=ndim, nsteps=args.slice_steps, 
                transform=transform, loglike=loglike, gradient=gradient)
        """
        sampler.run(frac_remain=0.5, min_num_live_points=args.num_live_points, max_num_improvement_loops=1)
        sampler.print_results()
        if sampler.stepsampler is not None:
            sampler.stepsampler.plot(filename = log_dir + '/stepsampler_stats_region.pdf')
        if ndim <= 20:
            sampler.plot()
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames, loglike, transform=transform, 
            num_live_points=args.num_live_points, vectorized=True,
            log_dir=args.log_dir + '-%dd' % ndim, resume=True)
        sampler.run()
        sampler.print_results()
        sampler.plot()
Esempio n. 19
0
def test_hotstart():
    np.random.seed(2)
    ctr = np.array([(42.0 + 1000) / 2000, (log10(0.1) + 2) / 4])
    cov = np.diag([0.01 / 2000, (log10(0.1) + 2) / 4 - (log10(0.09) + 2) / 4])**2
    invcov = np.linalg.inv(cov)

    Lguess = log_likelihood(prior_transform(np.random.uniform(size=len(parameters))))
    Lctr = log_likelihood(prior_transform(ctr))
    print(Lguess, Lctr)
    assert Lguess < Lctr - 100, (Lguess, Lctr)
    
    aux_log_likelihood, aux_transform = get_extended_auxiliary_problem(
            log_likelihood, prior_transform, ctr, invcov, 
            enlargement_factor=len(parameters)**0.5, df=20)
    
    proposals = np.array([aux_transform(np.random.uniform(size=len(parameters))) for i in range(40)])
    valid = proposals[:,2] > -1e100
    assert valid.sum() > 0.9, valid.sum()
    proposals = proposals[valid,:]
    print("proposals:", proposals, valid.sum())
    assert (np.abs(proposals[:,0] - 42) < 2).mean() > 0.9, proposals
    assert (np.abs(log10(proposals[:,1] / 0.1)) < 0.5).mean() > 0.9, proposals
    Lproposed = np.array([log_likelihood(p[:-1]) for p in proposals])
    assert np.mean(Lproposed > Lctr - 10) > 0.5, (Lproposed, Lctr)

    aux_sampler = ReactiveNestedSampler(
        parameters, aux_log_likelihood, transform=aux_transform,
        derived_param_names=['aux_logweight'],
    )
    aux_results = aux_sampler.run(frac_remain=0.5, viz_callback=None)
    aux_sampler.print_results()

    ref_sampler = ReactiveNestedSampler(
        parameters, log_likelihood, transform=prior_transform,
    )
    ref_results = ref_sampler.run(frac_remain=0.5, viz_callback=None)
    ref_sampler.print_results()
    
    assert aux_results['ncall'] < ref_results['ncall'] / 4, (ref_results['ncall'], aux_results['ncall'])
    assert np.abs(ref_results['posterior']['mean'][0] - aux_results['posterior']['mean'][0]) < 0.5, (ref_results['posterior'], aux_results['posterior'])
    assert np.abs(ref_results['posterior']['mean'][1] - aux_results['posterior']['mean'][1]) < 0.05, (ref_results['posterior'], aux_results['posterior'])
    assert 0.8 < (ref_results['posterior']['stdev'][0] / aux_results['posterior']['stdev'][0]) < 1.2, (ref_results['posterior'], aux_results['posterior'])
    assert 0.8 < (ref_results['posterior']['stdev'][1] / aux_results['posterior']['stdev'][1]) < 1.2, (ref_results['posterior'], aux_results['posterior'])
    assert np.abs(ref_results['logzerr'] - aux_results['logzerr']) < 0.5, (ref_results['logzerr'], aux_results['logzerr'])

    print("RECYCLING:")
    print("ref:", ref_results)
    rec_results = reuse_samples(parameters, log_likelihood, **ref_results['weighted_samples'], **ref_results)
    #assert rec_results['ncall'] < ref_results['ncall'] / 4, (ref_results['ncall'], rec_results['ncall'])
    assert np.abs(ref_results['posterior']['mean'][0] - rec_results['posterior']['mean'][0]) < 0.5, (ref_results['posterior'], rec_results['posterior'])
    assert np.abs(ref_results['posterior']['mean'][1] - rec_results['posterior']['mean'][1]) < 0.05, (ref_results['posterior'], rec_results['posterior'])
    assert 0.8 < (ref_results['posterior']['stdev'][0] / rec_results['posterior']['stdev'][0]) < 1.2, (ref_results['posterior'], rec_results['posterior'])
    assert 0.8 < (ref_results['posterior']['stdev'][1] / rec_results['posterior']['stdev'][1]) < 1.2, (ref_results['posterior'], rec_results['posterior'])
    assert np.abs(ref_results['logzerr'] - rec_results['logzerr']) < 0.5, (ref_results['logzerr'], rec_results['logzerr'])
    print("rec:", rec_results)
    del rec_results
    logls = np.array([log_likelihood(s) for s in ref_results['samples']])
    rec_results2 = reuse_samples(parameters, log_likelihood, points=ref_results['samples'], logl=logls)
    print("rec2:", rec_results2)
    assert rec_results2['ncall'] == len(logls), (ref_results['ncall'], rec_results2['ncall'])
    assert np.abs(ref_results['posterior']['mean'][0] - rec_results2['posterior']['mean'][0]) < 0.5, (ref_results['posterior'], rec_results2['posterior'])
    assert np.abs(ref_results['posterior']['mean'][1] - rec_results2['posterior']['mean'][1]) < 0.05, (ref_results['posterior'], rec_results2['posterior'])
    assert 0.5 < (ref_results['posterior']['stdev'][0] / rec_results2['posterior']['stdev'][0]) < 1.5, (ref_results['posterior'], rec_results2['posterior'])
    assert 0.5 < (ref_results['posterior']['stdev'][1] / rec_results2['posterior']['stdev'][1]) < 1.5, (ref_results['posterior'], rec_results2['posterior'])
Esempio n. 20
0
def main(args):
    ndim = args.x_dim
    adaptive_nsteps = args.adapt_steps
    if adaptive_nsteps is None:
        adaptive_nsteps = False

    def loglike(theta):
        a = theta[:, :-1]
        b = theta[:, 1:]
        return -2 * (100 * (b - a**2)**2 + (1 - a)**2).sum(axis=1)

    def transform(u):
        return u * 20 - 10

    def transform_loglike_gradient(u):
        theta = u * 20 - 10

        a = theta[:-1]
        b = theta[1:]
        grad = theta.copy()
        L = -2 * (100 * (b - a**2)**2 + (1 - a)**2).sum()
        for i in range(ndim):
            a = theta[i]
            if i < ndim - 1:
                b = theta[i + 1]
                grad[i] = -2 * (-400 * a * (b - a**2) - 2 * (1 - a))
            if i > 0:
                c = theta[i - 1]
                grad[i] += -400 * (a - c**2)

        prior_factor = 20

        return theta, L, grad * prior_factor

    def gradient(u):
        theta = u * 20 - 10

        grad = theta.copy()
        for i in range(ndim):
            a = theta[i]
            if i < ndim - 1:
                b = theta[i + 1]
                grad[i] = -2 * (-400 * a * (b - a**2) - 2 * (1 - a))
            if i > 0:
                c = theta[i - 1]
                grad[i] += -400 * (a - c**2)

        prior_factor = 20

        return grad * prior_factor

    paramnames = ['param%d' % (i + 1) for i in range(ndim)]

    if args.pymultinest:
        from pymultinest.solve import solve

        def flat_loglike(theta):
            return loglike(theta.reshape((1, -1)))

        result = solve(LogLikelihood=flat_loglike,
                       Prior=transform,
                       n_dims=ndim,
                       outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
                       verbose=True,
                       resume=True,
                       importance_nested_sampling=False)

        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

    elif args.reactive:
        if args.slice:
            log_dir = args.log_dir + 'RNS-%dd-slice%d' % (ndim,
                                                          args.slice_steps)
        elif args.harm:
            log_dir = args.log_dir + 'RNS-%dd-harm%d' % (ndim,
                                                         args.slice_steps)
        elif args.dyhmc:
            log_dir = args.log_dir + 'RNS-%dd-dyhmc%d' % (ndim,
                                                          args.slice_steps)
        elif args.dychmc:
            log_dir = args.log_dir + 'RNS-%dd-dychmc%d' % (ndim,
                                                           args.slice_steps)
        else:
            log_dir = args.log_dir + 'RNS-%dd' % (ndim)
        if adaptive_nsteps:
            log_dir = log_dir + '-adapt%s' % (adaptive_nsteps)

        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames,
                                        loglike,
                                        transform=transform,
                                        log_dir=log_dir,
                                        resume=True,
                                        vectorized=True)
        if args.slice:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(
                nsteps=args.slice_steps,
                adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        if args.harm:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(
                nsteps=args.slice_steps,
                adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        if args.dyhmc:
            import ultranest.dyhmc
            from ultranest.utils import verify_gradient
            verify_gradient(ndim,
                            transform,
                            loglike,
                            transform_loglike_gradient,
                            combination=True)
            sampler.stepsampler = ultranest.dyhmc.DynamicHMCSampler(
                ndim=ndim,
                nsteps=args.slice_steps,
                transform_loglike_gradient=transform_loglike_gradient,
                adaptive_nsteps=adaptive_nsteps)
        if args.dychmc:
            import ultranest.dychmc
            from ultranest.utils import verify_gradient
            verify_gradient(ndim, transform, loglike, gradient)
            sampler.stepsampler = ultranest.dychmc.DynamicCHMCSampler(
                ndim=ndim,
                nsteps=args.slice_steps,
                transform=transform,
                loglike=loglike,
                gradient=gradient,
                adaptive_nsteps=adaptive_nsteps)
        sampler.run(frac_remain=0.5,
                    min_num_live_points=args.num_live_points,
                    max_num_improvement_loops=1)
        sampler.print_results()
        if sampler.stepsampler is not None:
            sampler.stepsampler.plot(filename=log_dir +
                                     '/stepsampler_stats_region.pdf')
        if ndim <= 20:
            sampler.plot()
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames,
                                loglike,
                                transform=transform,
                                num_live_points=args.num_live_points,
                                vectorized=True,
                                log_dir=args.log_dir + '-%dd' % ndim,
                                resume=True)
        sampler.run()
        sampler.print_results()
        sampler.plot()
Esempio n. 21
0
def main(args):
    ndim = args.x_dim
    adaptive_nsteps = args.adapt_steps
    if adaptive_nsteps is None:
        adaptive_nsteps = False

    
    #C = 0.01
    r = 0.2
    # the shell thickness is 
    #w = (r**(ndim+1) + C * scipy.special.gamma((ndim+3)/2)*ndim*pi**(-(ndim+1)/2) / (
    #        scipy.special.gamma((ndim+2)/2) * pi**(-ndim/2)))**(1 / (ndim+1)) - r
    w = 0.001 / ndim
    
    r1, r2 = r, r
    w1, w2 = w, w
    c1, c2 = np.zeros(ndim) + 0.5, np.zeros(ndim) + 0.5
    c1[0] -= r1 / 2
    c2[0] += r2 / 2
    N1 = -0.5 * log(2 * pi * w1**2)
    N2 = -0.5 * log(2 * pi * w2**2)
    Z_analytic = log(shell_vol(ndim, r1, w1) + shell_vol(ndim, r2, w2))
    
    def loglike(theta):
        d1 = ((theta - c1)**2).sum(axis=1)**0.5
        d2 = ((theta - c2)**2).sum(axis=1)**0.5
        L1 = -0.5 * ((d1 - r1)**2) / w1**2 + N1
        L2 = -0.5 * ((d2 - r2)**2) / w2**2 + N2
        return np.logaddexp(L1, L2)

    def transform(x):
        return x
    
    def gradient(theta):
        delta1 = theta - c1
        delta2 = theta - c1
        d1 = (delta1**2).sum()**0.5
        d2 = (delta2**2).sum()**0.5
        g1 = -delta1 * (1 - r1 / d1) / w1**2
        g2 = -delta2 * (1 - r2 / d2) / w2**2
        return np.logaddexp(g1, g2)
    
    
    """
    N = 10000
    x = np.random.normal(size=(N, ndim))
    x *= (np.random.uniform(size=N)**(1./ndim) / (x**2).sum(axis=1)**0.5).reshape((-1, 1))
    x = x * r1 + c1
    print(loglike(x) - N1)
    print('%.3f%%' % ((loglike(x) - N1 > -ndim*2).mean() * 100))
    
    import sys; sys.exit()
    """
    
    paramnames = ['param%d' % (i+1) for i in range(ndim)]
    
    if args.pymultinest:
        from pymultinest.solve import solve
        
        def flat_loglike(theta):
            return loglike(theta.reshape((1, -1)))
        
        result = solve(LogLikelihood=flat_loglike, Prior=transform, 
            n_dims=ndim, outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
            verbose=True, resume=True, importance_nested_sampling=False)
        
        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
    
    elif args.reactive:
        if args.slice:
            log_dir = args.log_dir + 'RNS-%dd-slice%d' % (ndim, args.slice_steps)
        elif args.harm:
            log_dir = args.log_dir + 'RNS-%dd-harm%d' % (ndim, args.slice_steps)
        elif args.dyhmc:
            log_dir = args.log_dir + 'RNS-%dd-dyhmc%d' % (ndim, args.slice_steps)
        elif args.dychmc:
            log_dir = args.log_dir + 'RNS-%dd-dychmc%d' % (ndim, args.slice_steps)
        else:
            log_dir = args.log_dir + 'RNS-%dd' % (ndim)
        if adaptive_nsteps:
            log_dir = log_dir + '-adapt%s' % (adaptive_nsteps)
        
        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform, 
            log_dir=log_dir, resume=True,
            vectorized=True)
        if args.slice:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        if args.harm:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        #if args.dyhmc:
        #    import ultranest.dyhmc
        #    from ultranest.utils import verify_gradient
        #    verify_gradient(ndim, transform, loglike, transform_loglike_gradient, combination=True)
        #    sampler.stepsampler = ultranest.dyhmc.DynamicHMCSampler(ndim=ndim, nsteps=args.slice_steps, 
        #        transform_loglike_gradient=transform_loglike_gradient)
        if args.dychmc:
            import ultranest.dychmc
            from ultranest.utils import verify_gradient
            verify_gradient(ndim, transform, loglike, gradient, verbose=True)
            sampler.stepsampler = ultranest.dychmc.DynamicCHMCSampler(ndim=ndim, nsteps=args.slice_steps, 
                transform=transform, loglike=loglike, gradient=gradient)
        sampler.run(frac_remain=0.5, min_num_live_points=args.num_live_points, max_num_improvement_loops=1)
        sampler.print_results()
        if sampler.stepsampler is not None:
            sampler.stepsampler.plot(filename = log_dir + '/stepsampler_stats_region.pdf')
        if ndim <= 20:
            sampler.plot()
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames, loglike, transform=transform, 
            num_live_points=args.num_live_points, vectorized=True,
            log_dir=args.log_dir + '-%dd' % ndim, resume=True)
        sampler.run()
        sampler.print_results()
        sampler.plot()
    print("expected Z=%.3f (analytic solution)" % Z_analytic)
Esempio n. 22
0
def main(args):

    np.random.seed(2)
    Ndata = args.ndata
    jitter_true = 0.1
    phase_true = 0.
    period_true = 180
    amplitude_true = args.contrast / Ndata * jitter_true
    paramnames = ['amplitude', 'jitter', 'phase', 'period']
    ndim = 4
    derivednames = [] #'frequency']
    wrapped_params = [False, False, True, False]
    #wrapped_params = None
    
    x = np.linspace(0, 360, 1000)
    y = amplitude_true * sin(x / period_true * 2 * pi + phase_true)
    
    if True:
        plt.plot(x, y)
        x = np.random.uniform(0, 360, Ndata)
        y = np.random.normal(amplitude_true * sin(x / period_true * 2 * pi + phase_true), jitter_true)
        plt.errorbar(x, y, yerr=jitter_true, marker='x', ls=' ')
        plt.savefig('testsine.pdf', bbox_inches='tight')
        plt.close()
    
    
    def loglike(params):
        amplitude, jitter, phase, period = params.transpose()[:4]
        predicty = amplitude * sin(x.reshape((-1,1)) / period * 2 * pi + phase)
        logl = (-0.5 * log(2 * pi * jitter**2) - 0.5 * ((predicty - y.reshape((-1,1))) / jitter)**2).sum(axis=0)
        assert logl.shape == jitter.shape
        return logl
    
    def transform(x):
        z = np.empty((len(x), 4))
        z[:,0] = 10**(x[:,0] * 4 - 2)
        z[:,1] = 10**(x[:,1] * 1 - 1.5)
        z[:,2] = 2 * pi * x[:,2]
        z[:,3] = 10**(x[:,3] * 4 - 1)
        #z[:,4] = 2 * pi / x[:,3]
        return z

    loglike(transform(np.ones((2, ndim))*0.5))
    if args.pymultinest:
        from pymultinest.solve import solve
        global Lmax
        Lmax = -np.inf
        
        def flat_loglike(theta):
            L = loglike(theta.reshape((1, -1)))[0]
            global Lmax
            if L > Lmax:
                print("Like: %.2f" % L)
                Lmax = L
            return L
        
        def flat_transform(cube):
            return transform(cube.reshape((1, -1)))[0]
        
        result = solve(LogLikelihood=flat_loglike, Prior=flat_transform, 
            n_dims=ndim, outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
            n_live_points=args.num_live_points,
            verbose=True, resume=False, importance_nested_sampling=False)
        
        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
        return
    
    elif args.reactive:
        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform, 
            log_dir=args.log_dir, vectorized=True,
            derived_param_names=derivednames, wrapped_params=wrapped_params,
            resume='overwrite')
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames, loglike, transform=transform, 
            log_dir=args.log_dir, vectorized=True,
            derived_param_names=derivednames, wrapped_params=wrapped_params,
            resume='overwrite')
    
    sampler.run(min_num_live_points=args.num_live_points)
        
    print()
    sampler.plot()
    
    for i, p in enumerate(paramnames + derivednames):
        v = sampler.results['samples'][:,i]
        print('%20s: %5.3f +- %5.3f' % (p, v.mean(), v.std()))
Esempio n. 23
0
def main(args):
    ndim = args.x_dim
    paramnames = list(string.ascii_lowercase)[:ndim]

    np.random.seed(args.seed)
    if args.wrapped_dims:
        wrapped_params = [True] * ndim
    else:
        wrapped_params = None

    true_Z = None

    if args.log_dir is None:
        if args.delete_dir:
            return
        log_dir = None
    else:
        log_dir = args.log_dir + '-%s' % args.problem
        log_dir += '-%dd' % ndim
        if args.wrapped_dims:
            log_dir += '-wrapped'

        if args.delete_dir:
            shutil.rmtree(log_dir, ignore_errors=True)

    if ndim >= 20 and args.num_live_points >= 1000:
        print("skipping, probably too slow to run")
        return

    if args.problem == 'gauss':
        sigma = 0.01
        if args.wrapped_dims:
            centers = (np.sin(np.arange(ndim) / 2.) + 1.) / 2.
        else:
            centers = (np.sin(np.arange(ndim) / 2.) / 2. + 1.) / 2.
        true_Z = 0

        def loglike(theta):
            like = -0.5 * (((theta - centers) / sigma)**2).sum(
                axis=1) - 0.5 * np.log(2 * np.pi * sigma**2) * ndim
            return like

        def transform(x):
            return x
    elif args.problem == 'slantedeggbox':
        if not args.pass_transform:
            return

        def loglike(z):
            chi = (2. + (np.cos(z[:, :2] / 2.)).prod(axis=1))**5
            chi2 = -np.abs((z - 5 * np.pi) / 0.5).sum(axis=1)
            return chi + chi2

        def transform(x):
            return x * 100
    elif args.problem == 'funnel':
        if args.wrapped_dims: return
        if not args.pass_transform:
            return

        sigma = 0.01
        centers = np.sin(np.arange(ndim) / 2.)
        data = np.random.normal(centers, sigma).reshape((1, -1))

        def loglike(theta):
            sigma = 10**theta[:, 0]
            like = -0.5 * (((theta[:, 1:] - data) / sigma.reshape(
                (-1, 1)))**2).sum(axis=1) - 0.5 * np.log(
                    2 * np.pi * sigma**2) * ndim
            return like

        def transform(x):
            z = x * 20 - 10
            z[:, 0] = x[:, 0] * 6 - 3
            return z

        paramnames.insert(0, 'sigma')
    elif args.problem == 'loggamma':
        true_Z = 0.0
        if args.wrapped_dims: return
        rv1a = scipy.stats.loggamma(1, loc=2. / 3, scale=1. / 30)
        rv1b = scipy.stats.loggamma(1, loc=1. / 3, scale=1. / 30)
        rv2a = scipy.stats.norm(2. / 3, 1. / 30)
        rv2b = scipy.stats.norm(1. / 3, 1. / 30)
        rv_rest = []
        for i in range(2, ndim):
            if i <= (ndim + 2) / 2:
                rv = scipy.stats.loggamma(1, loc=2. / 3., scale=1. / 30)
            else:
                rv = scipy.stats.norm(2. / 3, 1. / 30)
            rv_rest.append(rv)
            del rv

        def loglike(theta):
            L1 = np.log(0.5 * rv1a.pdf(theta[:, 0]) +
                        0.5 * rv1b.pdf(theta[:, 0]))
            L2 = np.log(0.5 * rv2a.pdf(theta[:, 1]) +
                        0.5 * rv2b.pdf(theta[:, 1]))
            Lrest = np.sum([
                rv.logpdf(t)
                for rv, t in zip(rv_rest, theta[:, 2:].transpose())
            ],
                           axis=0)
            like = L1 + L2 + Lrest
            like = np.where(like < -300,
                            -300 - ((np.asarray(theta) - 0.5)**2).sum(), like)
            assert like.shape == (len(theta), ), (like.shape, theta.shape)
            return like

        def transform(x):
            return x

    from ultranest import ReactiveNestedSampler
    sampler = ReactiveNestedSampler(
        paramnames,
        loglike,
        transform=transform if args.pass_transform else None,
        log_dir=log_dir,
        vectorized=True,
        resume='resume' if args.resume else 'overwrite',
        wrapped_params=wrapped_params,
    )
    for result in sampler.run_iter(
            update_interval_iter_fraction=args.update_interval_iter_fraction,
            dlogz=args.dlogz,
            dKL=args.dKL,
            frac_remain=args.frac_remain,
            min_ess=args.min_ess,
            max_iters=args.max_iters,
            cluster_num_live_points=args.cluster_num_live_points,
            min_num_live_points=args.num_live_points,
            max_ncalls=int(args.max_ncalls),
    ):
        sampler.print_results()
        print(" (remember, we are trying to achive: %s ) " % (dict(
            dlogz=args.dlogz,
            dKL=args.dKL,
            frac_remain=args.frac_remain,
            min_ess=args.min_ess,
        )))

    results = sampler.results
    sampler.plot()
    sampler.pointstore.close()
    if results['logzerr_tail'] < 0.5 and results[
            'logzerr'] < 1.0 and true_Z is not None and args.num_live_points > 50:
        assert results['logz'] - results['logzerr'] * 3 < true_Z < results[
            'logz'] + results['logzerr'] * 3
    return results
M, N = data.shape
assert N == 100

# hierarchical bayesian model
params = ['mean', 'std', 'frac_outliers']
def prior_transform(cube):
	params = cube.copy()
	params[0] = cube[0] * 100 # mean
	params[1] = 10**(cube[1] * 2) # std
	params[2] = 10**(-2 * cube[2] - 1) # frac_outliers
	return params

def loglikelihood(params):
	mean, std, frac_outliers = params
	prob = exp(-0.5 * ((data - mean)/std)**2 - log(2 * pi * std))
	assert prob.mean(axis=0).shape == (N,)
	return log(1e-100 + prob.mean(axis=0)).sum() # * (1 - frac_outliers) + frac_outliers + 1e-100).sum()

from ultranest import ReactiveNestedSampler

sampler = ReactiveNestedSampler(params, loglikelihood, transform=prior_transform,
	log_dir=prefix + '_gauss_outliers', resume='overwrite')
sampler.run()
sampler.plot()






Esempio n. 25
0
def test_reactive_run_resume_eggbox(storage_backend):
    from ultranest import ReactiveNestedSampler
    from ultranest import read_file

    def loglike(z):
        chi = (np.cos(z / 2.)).prod(axis=1)
        loglike.ncalls += len(z)
        return (2. + chi)**5
    loglike.ncalls = 0

    def transform(x):
        return x * 10 * np.pi

    paramnames = ['a', 'b']
    ndim = len(paramnames)

    #last_results = None
    folder = tempfile.mkdtemp()
    np.random.seed(1)
    try:
        for i in range(2):
            print()
            print("====== Running Eggbox problem [%d] =====" % (i+1))
            print()
            sampler = ReactiveNestedSampler(paramnames,
                loglike, transform=transform,
                log_dir=folder, resume=True, vectorized=True, draw_multiple=False,
                storage_backend=storage_backend)
            initial_ncalls = int(sampler.ncall)
            num_live_points = 100
            loglike.ncalls = 0
            r = sampler.run(max_iters=200 + i*200,
                max_num_improvement_loops=0,
                min_num_live_points=num_live_points,
                cluster_num_live_points=0)
            sampler.print_results()
            if storage_backend == 'hdf5':
                print("pointstore:", sampler.pointstore.fileobj['points'].shape)
            sampler.pointstore.close()
            print(loglike.ncalls, r['ncall'], initial_ncalls)

            ncalls = loglike.ncalls
            if sampler.mpi_size > 1:
                ncalls = sampler.comm.gather(ncalls, root=0)
                if sampler.mpi_rank == 0:
                    print("ncalls on the different MPI ranks:", ncalls)
                ncalls = sum(sampler.comm.bcast(ncalls, root=0))
            ncalls = ncalls + initial_ncalls
            assert abs(r['ncall'] - ncalls) <= 2 * sampler.mpi_size, (i, r['ncall'], ncalls, r['ncall'] - ncalls)
            assert paramnames == r['paramnames'], 'paramnames should be in results'

        # the results are not exactly the same, because the sampling adds
        #ncalls = loglike.ncalls
        #sampler = ReactiveNestedSampler(paramnames,
        #    loglike, transform=transform,
        #    log_dir=folder, resume=True, vectorized=True, num_test_samples=0)
        #print("pointstore:", sampler.pointstore.fileobj['points'].shape)
        #assert ncalls == loglike.ncalls, (ncalls, loglike.ncalls)
        if storage_backend == 'hdf5':
            sequence, results = read_file(folder, ndim, random=False, num_bootstraps=0)

            print("sampler results: ********************")
            print({k:v for k, v in r.items() if np.asarray(v).size < 20 and k != 'weighted_samples'})
            print("reader results: ********************")
            print({k:v for k, v in results.items() if np.asarray(v).size < 20 and k != 'weighted_samples'})
            for k, v in results.items():
                if k == 'posterior' or k == 'samples':
                    pass
                elif k == 'weighted_samples' or k == 'maximum_likelihood':
                    for k2, v2 in results[k].items():
                        if k2 == 'bootstrapped_weights': continue
                        print("  ", k, "::", k2, np.shape(v2))
                        assert_allclose(r[k][k2], v2)
                elif k.startswith('logzerr') or '_bs' in k or 'Herr' in k:
                    print("   skipping", k, np.shape(v))
                    #assert_allclose(r[k], v, atol=0.5)
                elif k == 'insertion_order_MWW_test':
                    print('insertion_order_MWW_test:', r[k], v)
                    assert r[k] == v, (r[k], v)
                else:
                    print("  ", k, np.shape(v))
                    assert_allclose(r[k], v)

            logw = r['weighted_samples']['logw']
            v = r['weighted_samples']['points']
            L = r['weighted_samples']['logl']

            assert sequence['logz'][-1] - r['logz'] < 0.5, (results['logz'][-1], r['logz'])
            assert sequence['logzerr'][-1] <= r['logzerr_single'], (results['logzerr'][-1], r['logzerr'])
            #assert_allclose(sequence['logz_final'], r['logz_single'], atol=0.3)
            #assert_allclose(sequence['logzerr_final'], r['logzerr_single'], atol=0.1)
            assert r['niter'] <= sequence['niter'] <= r['niter'], (sequence['niter'], r['niter'])
            assert results['niter'] == len(sequence['logz']) == len(sequence['logzerr']) == len(sequence['logvol']) == len(sequence['logwt'])
            assert results['niter'] == len(results['samples'])
            data = np.loadtxt(folder + '/chains/weighted_post.txt', skiprows=1)
            assert_allclose(data[:,0], results['weighted_samples']['weights'])
            assert_allclose(data[:,1], results['weighted_samples']['logl'])
            assert_allclose(v, results['weighted_samples']['points'])
            assert_allclose(logw, results['weighted_samples']['logw'])
            assert_allclose(L, results['weighted_samples']['logl'])

            assert_allclose(L, sequence['logl'])
            #assert_allclose(logw + L, sequence['logwt'])
            assert sequence['logvol'].shape == logw.shape == (len(L),), (sequence['logvol'].shape, logw.shape)
            assert sequence['logwt'].shape == logw.shape == (len(L),), (sequence['logwt'].shape, logw.shape)
            #assert_allclose(logw, sequence['logvols'])
            #assert results['samples_untransformed'].shape == v.shape == (len(L), ndim), (results['samples_untransformed'].shape, v.shape)

    finally:
        shutil.rmtree(folder, ignore_errors=True)
Esempio n. 26
0
    params[0] = cube[0] * (hi - lo) + lo
    # let slope go from 12.0 to 15.0
    lo = 12.0
    hi = 15.0
    params[1] = cube[1] * (hi - lo) + lo
    # let slope go from 1e-8 to 2.0
    lo = 1e-8
    hi = 2.0
    params[2] = cube[2] * (hi - lo) + lo
    # let slope go from 1e-8 to 2.0
    lo = 1e-8
    hi = 2.0
    params[3] = cube[3] * (hi - lo) + lo
    # let slope go from 1e-8 to 2.0
    lo = 1e-8
    hi = 2.0
    params[4] = cube[4] * (hi - lo) + lo
    return params


def log_likelihood(par):
    M_cut, M1, sigma, kappa, alpha = par
    diff = wp_data - wp_model(par)
    return -0.5 * np.dot(diff, np.linalg.solve(wp_cov, diff))


sampler = ReactiveNestedSampler(parameters, log_likelihood, prior_transform)

result = sampler.run(min_num_live_points=400, dKL=np.inf, min_ess=100)

sampler.print_results()
Esempio n. 27
0
def test_reactive_run_warmstart_gauss():
    from ultranest import ReactiveNestedSampler
    from ultranest import read_file
    center = 0

    def loglike(z):
        chi2 = (((z - center)/0.001)**2).sum(axis=1)
        loglike.ncalls += len(z)
        return -0.5 * chi2
    loglike.ncalls = 0

    def transform(x):
        return x * 20000 - 10000

    paramnames = ['a']
    ndim = len(paramnames)

    folder = tempfile.mkdtemp()
    np.random.seed(1)
    first_ncalls = None
    resume_ncalls = None
    try:
        for i, resume in enumerate(['overwrite', 'resume', 'resume-similar']):
            print()
            print("====== Running Gauss problem [%d] =====" % (i+1))
            print()
            center = (i+1) * 1e-4
            try:
                sampler = ReactiveNestedSampler(paramnames,
                    loglike, transform=transform,
                    log_dir=folder, resume=resume, vectorized=True, draw_multiple=False,
                    warmstart_max_tau=0.5)
            except Exception as e:
                # we expect an error for resuming with a changed likelihood
                if resume != 'resume':
                    raise e
                else:
                    assert 'loglikelihood function changed' in str(e), e
                    print("Exception as expected:", e)
                    continue
            initial_ncalls = int(sampler.ncall)
            if i == 0:
                assert initial_ncalls == 0
            num_live_points = 100
            loglike.ncalls = 0
            r = sampler.run(
                max_num_improvement_loops=0,
                min_num_live_points=num_live_points,
                cluster_num_live_points=0, viz_callback=None, frac_remain=
                0.5)
            sampler.print_results()
            print("pointstore:", sampler.pointstore.fileobj['points'].shape)
            sampler.pointstore.close()
            print(loglike.ncalls, r['ncall'], initial_ncalls)

            ncalls = loglike.ncalls
            if sampler.mpi_size > 1:
                ncalls = sampler.comm.gather(ncalls, root=0)
                if sampler.mpi_rank == 0:
                    print("ncalls on the different MPI ranks:", ncalls)
                ncalls = sum(sampler.comm.bcast(ncalls, root=0))
            ncalls = ncalls + initial_ncalls
            if i == 0:
                first_ncalls = ncalls
            if i == 2:
                resume_ncalls = loglike.ncalls
            assert abs(r['ncall'] - ncalls) <= 2 * sampler.mpi_size, (i, r['ncall'], ncalls, r['ncall'] - ncalls)
            assert paramnames == r['paramnames'], 'paramnames should be in results'

    finally:
        shutil.rmtree(folder, ignore_errors=True)
    
    # make sure warm start is much faster
    assert resume_ncalls < first_ncalls - 800, (resume_ncalls, first_ncalls)
Esempio n. 28
0
def main(args):
    def loglike(z):
        chi = (cos(z / 2.)).prod(axis=1)
        return (2. + chi)**5

    def transform(x):
        return x * 10 * pi

    import string
    paramnames = list(string.ascii_lowercase)[:args.x_dim]

    if args.reactive:
        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(
            paramnames,
            loglike,
            transform=transform,
            log_dir=args.log_dir,
            resume='overwrite',
            draw_multiple=False,
            vectorized=True,
        )
        sampler.run(
            log_interval=20,
            max_num_improvement_loops=10,
            min_num_live_points=args.num_live_points,
        )
        sampler.print_results()
        sampler.plot()
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames,
                                loglike,
                                transform=transform,
                                num_live_points=args.num_live_points,
                                vectorized=True,
                                log_dir=args.log_dir,
                                resume='overwrite')
        #log_dir=None)
        sampler.run(log_interval=20)
        sampler.print_results()
        sampler.plot()
Esempio n. 29
0
def main(args):
    ndim = args.x_dim
    sigma = args.sigma
    sigma = np.logspace(-1, np.log10(args.sigma), ndim)
    width = 1 - 5 * sigma
    width[width < 1e-20] = 1e-20
    centers = (np.sin(np.arange(ndim) / 2.) * width + 1.) / 2.
    #centers = np.ones(ndim) * 0.5

    adaptive_nsteps = args.adapt_steps
    if adaptive_nsteps is None:
        adaptive_nsteps = False

    def loglike(theta):
        like = -0.5 * (((theta - centers) / sigma)**2).sum(
            axis=1) - 0.5 * np.log(2 * np.pi * sigma**2).sum()
        return like

    def transform(x):
        return x

    def transform_loglike_gradient(u):
        theta = u
        like = -0.5 * (((theta - centers) / sigma)**2).sum(
            axis=1) - 0.5 * np.log(2 * np.pi * sigma**2).sum()
        grad = (theta - centers) / sigma
        return u, like, grad

    def gradient(theta):
        return (theta - centers) / sigma

    paramnames = ['param%d' % (i + 1) for i in range(ndim)]

    if args.pymultinest:
        from pymultinest.solve import solve

        def flat_loglike(theta):
            return loglike(theta.reshape((1, -1)))

        result = solve(LogLikelihood=flat_loglike,
                       Prior=transform,
                       n_dims=ndim,
                       outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
                       verbose=True,
                       resume=True,
                       importance_nested_sampling=False)

        print()
        print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
        print()
        print('parameter values:')
        for name, col in zip(paramnames, result['samples'].transpose()):
            print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))

    elif args.reactive:
        if args.slice:
            log_dir = args.log_dir + 'RNS-%dd-slice%d' % (ndim,
                                                          args.slice_steps)
        elif args.harm:
            log_dir = args.log_dir + 'RNS-%dd-harm%d' % (ndim,
                                                         args.slice_steps)
        elif args.dyhmc:
            log_dir = args.log_dir + 'RNS-%dd-dyhmc%d' % (ndim,
                                                          args.slice_steps)
        elif args.dychmc:
            log_dir = args.log_dir + 'RNS-%dd-dychmc%d' % (ndim,
                                                           args.slice_steps)
        else:
            log_dir = args.log_dir + 'RNS-%dd' % (ndim)
        if adaptive_nsteps:
            log_dir = log_dir + '-adapt%s' % (adaptive_nsteps)

        from ultranest import ReactiveNestedSampler
        sampler = ReactiveNestedSampler(paramnames,
                                        loglike,
                                        transform=transform,
                                        log_dir=log_dir,
                                        resume=True,
                                        vectorized=True)
        if args.slice:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(
                nsteps=args.slice_steps,
                adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        if args.harm:
            import ultranest.stepsampler
            sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(
                nsteps=args.slice_steps,
                adaptive_nsteps=adaptive_nsteps,
                log=open(log_dir + '/stepsampler.log', 'w'))
        if args.dyhmc:
            import ultranest.dyhmc
            from ultranest.utils import verify_gradient
            verify_gradient(ndim,
                            transform,
                            loglike,
                            transform_loglike_gradient,
                            combination=True)
            sampler.stepsampler = ultranest.dyhmc.DynamicHMCSampler(
                ndim=ndim,
                nsteps=args.slice_steps,
                transform_loglike_gradient=transform_loglike_gradient,
                adaptive_nsteps=adaptive_nsteps)
        if args.dychmc:
            import ultranest.dychmc
            from ultranest.utils import verify_gradient
            verify_gradient(ndim, transform, loglike, gradient)
            sampler.stepsampler = ultranest.dychmc.DynamicCHMCSampler(
                ndim=ndim,
                nsteps=args.slice_steps,
                transform=transform,
                loglike=loglike,
                gradient=gradient,
                adaptive_nsteps=adaptive_nsteps)
        sampler.run(frac_remain=0.5,
                    min_num_live_points=args.num_live_points,
                    max_num_improvement_loops=1)
        sampler.print_results()
        if sampler.stepsampler is not None:
            sampler.stepsampler.plot(filename=log_dir +
                                     '/stepsampler_stats_region.pdf')
        if ndim <= 20:
            sampler.plot()
    else:
        from ultranest import NestedSampler
        sampler = NestedSampler(paramnames,
                                loglike,
                                transform=transform,
                                num_live_points=args.num_live_points,
                                vectorized=True,
                                log_dir=args.log_dir + '-%dd' % ndim,
                                resume=True)
        sampler.run()
        sampler.print_results()
        sampler.plot()