def prior_transform_polychord(cube): """ A function defining the tranform between the parameterisation in the unit hypercube to the true parameters. Args: cube (array, list): a list containing the parameters as drawn from a unit hypercube. Returns: list: the transformed parameters. """ #mprime, cprime = cube # unpack the parameters (in their unit hypercube form) mprime = cube[0] cprime = cube[1] cmin = -10. # lower bound on uniform prior on c cmax = 10. # upper bound on uniform prior on c mmu = 0. # mean of Gaussian prior on m msigma = 10. # standard deviation of Gaussian prior on m m = mmu + msigma * ndtri(mprime) # convert back to m c = UniformPrior(cmin, cmax)( cprime) # convert back to c using UniformPrior class theta = [m, c] return theta
def uniform_prior(params): beta = params[-1:].item() rational_beta = fractions.Fraction(beta) theta = params[:-1] idx = hash(tuple(theta)) % rational_beta.denominator prior = UniformPrior(mdl1.a, mdl1.b)(theta) return numpy.append(prior, beta)
def prior(hypercube): prior = [] for h, pr in zip(hypercube, self.params.p_free_priors): if pr[1] == 'Gaussian': prior.append( GaussianPrior(float(pr[2][0]), float(pr[2][1]))(h)) else: prior.append( UniformPrior(float(pr[2][0]), float(pr[2][2]))(h)) return prior
def mixture_model_prior(params): beta = params[-1:].item() rational_beta = fractions.Fraction(beta) theta = params[:-1] idx = hash(tuple(theta)) % rational_beta.denominator if idx > rational_beta.numerator: prior = UniformPrior(mdl1.a, mdl1.b)(theta) else: prior = GaussianPrior(mdl1.mu[:-1], mdl1.sigma[:-1])(theta) return numpy.append(prior, beta)
def run_jaxns(num_live_points): try: from jaxns.nested_sampling import NestedSampler from jaxns.prior_transforms import PriorChain, UniformPrior except: raise ImportError("Install JaxNS!") from timeit import default_timer from jax import random, jit import jax.numpy as jnp def log_likelihood(theta, **kwargs): r2 = jnp.sum(theta ** 2) logL = -0.5 * jnp.log(2. * jnp.pi * sigma ** 2) * ndims logL += -0.5 * r2 / sigma ** 2 return logL prior_transform = PriorChain().push(UniformPrior('theta', -jnp.ones(ndims), jnp.ones(ndims))) ns = NestedSampler(log_likelihood, prior_transform, sampler_name='slice') def run_with_n(n): @jit def run(key): return ns(key=key, num_live_points=n, max_samples=1e6, collect_samples=False, termination_frac=0.01, stoachastic_uncertainty=False, sampler_kwargs=dict(depth=3, num_slices=2)) results = run(random.PRNGKey(0)) results.logZ.block_until_ready() t0 = default_timer() results = run(random.PRNGKey(1)) print("Efficiency and logZ", results.efficiency, results.logZ) run_time = (default_timer() - t0) return run_time return run_with_n(num_live_points)
def quantile(cube): return UniformPrior(-10, 10)(cube)
def prior(hypercube): ''' Uniform prior ''' prior = [] for i, lims in enumerate(limits): prior.append(UniformPrior(lims[0], lims[1])(hypercube[i])) return prior
def prior(hypercube): """ Uniform prior from [-1,1]^D. """ return UniformPrior(-1, 1)(hypercube)
def simple_prior(point_in_hypercube): return UniformPrior(-20, 20)(point_in_hypercube)
def quantile(self, hypercube): beta = hypercube[-1:].item() # PolyChord refers to Quantile functions as priors. # This is not incorrect, but can be confusing. uniform = UniformPrior(self.a * beta, self.b * beta)(hypercube[:-1]) return concatenate([uniform, [beta]])
def quantile(self, hypercube): return UniformPrior(self.a, self.b)(hypercube)
def prior(hypercube): """ Uniform prior """ prior = [] for i, limits in enumerate(self.limits.values()): prior.append(UniformPrior(limits[0], limits[1])(hypercube[i])) return prior
def prior(self, cube): return UniformPrior(-20, 20)(cube)