Ejemplo n.º 1
0
    def compute_fit(self):
        self._polychord_output = None
        data = self._observed.spectrum
        datastd = self._observed.errorBar
        sqrtpi = np.sqrt(2 * np.pi)

        ndim = len(self.fitting_parameters)

        def polychord_loglike(cube):
            # log-likelihood function called by polychord
            fit_params_container = np.array(
                [cube[i] for i in range(len(self.fitting_parameters))])
            chi_t = self.chisq_trans(fit_params_container, data, datastd)

            # print('---------START---------')
            # print('chi_t',chi_t)
            # print('LOG',loglike)
            loglike = -np.sum(np.log(datastd * sqrtpi)) - 0.5 * chi_t
            return loglike, [0.0]

        def polychord_uniform_prior(hypercube):
            # prior distributions called by polychord. Implements a uniform prior
            # converting parameters from normalised grid to uniform prior
            # print(type(cube))
            cube = [0.0] * ndim

            for idx, bounds in enumerate(self.fit_boundaries):
                # print(idx,self.fitting_parameters[idx])
                bound_min, bound_max = bounds
                cube[idx] = (hypercube[idx] *
                             (bound_max - bound_min)) + bound_min
                #print('CUBE idx',cube[idx])
            # print('-----------')
            return cube

        status = None

        datastd_mean = np.mean(datastd)

        settings = PolyChordSettings(ndim, 1)
        settings.nlive = ndim * 25
        settings.num_repeats = ndim * 5
        settings.do_clustering = self.do_clustering
        settings.num_repeats = ndim
        settings.precision_criterion = self.evidence_tolerance
        settings.logzero = -1e70
        settings.read_resume = self.resume
        settings.base_dir = self.dir_polychord
        settings.file_root = '1-'
        self.warning('Number of dimensions {}'.format(ndim))
        self.warning('Fitting parameters {}'.format(self.fitting_parameters))

        self.info('Beginning fit......')
        pypolychord.run_polychord(polychord_loglike, ndim, 1, settings,
                                  polychord_uniform_prior)
        self._polychord_output = self.store_polychord_solutions()
        print(self._polychord_output)
Ejemplo n.º 2
0
    def polychord_sampler(self):
        import pypolychord
        from pypolychord.settings import PolyChordSettings
        from pypolychord.priors import UniformPrior, GaussianPrior

        ndim = len(self.params.p0)
        nder = 0

        # Log-likelihood compliant with PolyChord's input
        def likelihood(theta):
            return self.lnlike(theta), [0]

        def prior(hypercube):
            prior = []
            for h, pr in zip(hypercube, self.params.p_free_priors):
                if pr[1] == 'Gaussian':
                    prior.append(
                        GaussianPrior(float(pr[2][0]), float(pr[2][1]))(h))
                else:
                    prior.append(
                        UniformPrior(float(pr[2][0]), float(pr[2][2]))(h))
            return prior

        # Optional dumper function giving run-time read access to
        # the live points, dead points, weights and evidences
        def dumper(live, dead, logweights, logZ, logZerr):
            print("Last dead point:", dead[-1])

        settings = PolyChordSettings(ndim, nder)
        settings.base_dir = self.get_output(
            'param_chains')[:-4]  # Remove ".npz"
        settings.file_root = 'pch'
        settings.nlive = self.config['nlive']
        settings.num_repeats = self.config['nrepeat']
        settings.do_clustering = False  # Assume unimodal posterior
        settings.boost_posterior = 10  # Increase number of posterior samples
        settings.nprior = 200  # Draw nprior initial prior samples
        settings.maximise = True  # Maximize posterior at the end
        settings.read_resume = False  # Read from resume file of earlier run
        settings.feedback = 2  # Verbosity {0,1,2,3}

        output = pypolychord.run_polychord(likelihood, ndim, nder, settings,
                                           prior, dumper)

        return output
    def run_polychord(num_live_points):
        try:
            import pypolychord
            from pypolychord.settings import PolyChordSettings
            from pypolychord.priors import UniformPrior
        except:
            raise ImportError("Polychord not installed.\nRun `git clone https://github.com/PolyChord/PolyChordLite.git \ncd PolyChordLite\npython setup.py install`.")

        def likelihood(theta):
            """ Simple Gaussian Likelihood"""
            nDims = len(theta)
            r2 = sum(theta**2)
            logL = -log(2*pi*sigma*sigma)*nDims/2.0
            logL += -r2/2/sigma/sigma
            return logL, [r2]


        def prior(hypercube):
            """ Uniform prior from [-1,1]^D. """
            return UniformPrior(-1, 1)(hypercube)


        def dumper(live, dead, logweights, logZ, logZerr):
            return
            # print("Last dead point:", dead[-1])

        settings = PolyChordSettings(ndims, 1)
        settings.file_root = 'gaussian'
        settings.nlive = num_live_points
        settings.do_clustering = True
        settings.read_resume = False

        t0 = default_timer()
        output = pypolychord.run_polychord(likelihood, ndims, 1, settings, prior, dumper)
        run_time = default_timer() - t0
        print("polychord log(Z):", output.logZ)
        return run_time
Ejemplo n.º 4
0
# In this module I'm going to investigate the effect of the so-called
# Lasenby parameter as well as the effects of proper prior
# repartitioning, and benchmark to see a speedup.

# Vanilla

mu = numpy.array([1.0, 2.5])
cov = numpy.array([[1.0, 0.6], [0.6, 1.0]])
nDims = mu.size

settings = PolyChordSettings(nDims, 0)
settings.file_root = 'vanilla'
settings.nlive = 10**3
settings.read_resume = False
settings.do_clustering = True
settings.feedback = 0


def gaussian_likelihood(theta):
    invSig = numpy.linalg.inv(cov)
    norm = numpy.linalg.slogdet(2 * numpy.pi * cov)[1] / 2
    logL = -norm - (theta - mu) @ invSig @ (theta - mu) / 2
    return logL, []


def uniform_prior(point_in_hypercube):
    return pypolychord.priors.UniformPrior(-20, 20)(point_in_hypercube)


# try:
def prior(hypercube):
    """ Priors for each parameter. """
    theta = [0.0] * nDims
    for i, x in enumerate(hypercube):
        theta[i] = priordict[parnames[i]].ppf(x)

    return theta


# Define PolyChord settings
settings = PolyChordSettings(
    nDims,
    nDerived,
)
settings.do_clustering = args_params.noclust
settings.nlive = nDims * args_params.nlive
settings.base_dir = base_dir
settings.file_root = 'hd40307_k{}'.format(nplanets)  # modelpath[12:-3]
settings.num_repeats = nDims * args_params.nrep
settings.precision_criterion = args_params.prec
settings.read_resume = False

# Change settings if resume is true
if args_params.resume:
    settings.read_resume = args_params.resume
    settings.base_dir = dirname + prev_run

# Run PolyChord
output = PPC.run_polychord(logLikelihood, nDims, nDerived, settings, prior)
Ejemplo n.º 6
0
def run_polychord(loglikelihood, prior, dumper, nDims, nlive, root, ndump,
                  num_repeats):
    """Run PolyChord.

    See https://arxiv.org/abs/1506.00171 for more detail

    Parameters
    ----------
    loglikelihood: :obj:`callable`
        probability function taking a single parameter:

        - theta: numpy.array
                 physical parameters, `shape=(nDims,)`

        returning a log-likelihood (float)

    prior: :obj:`callable`
        tranformation function taking a single parameter

        - cube: numpy.array
                hypercube parameters, `shape=(nDims,)`

        returning physical parameters (`numpy.array`)

    dumper: :obj:`callable`
        access function called every nlive iterations giving a window onto
        current live points. Single parameter, no return:

        - live:
               `numpy.array of` live parameters and loglikelihoods,
               `shape=(nlive,nDims+1)`

    nDims: int
        Dimensionality of sampling space

    nlive: int
        Number of live points

    root: str
        base name for output files

    ndump: int
        How many iterations between dumper function calls

    num_repeats: int
        Length of chain to generate new live points

    """
    import pypolychord
    from pypolychord.settings import PolyChordSettings

    nDerived = 0
    settings = PolyChordSettings(nDims, nDerived)
    settings.base_dir = os.path.dirname(root)
    settings.file_root = os.path.basename(root)
    settings.nlive = nlive
    settings.num_repeats = num_repeats
    settings.do_clustering = True
    settings.read_resume = False
    settings.compression_factor = numpy.exp(-float(ndump)/nlive)
    settings.precision_criterion = 0.01

    def polychord_loglikelihood(theta):
        return loglikelihood(theta), []

    def polychord_dumper(live, dead, logweights, logZ, logZerr):
        dumper(live[:, :-2], live[:, -1], dead[:, :-2], dead[:, -1])

    pypolychord.run_polychord(polychord_loglikelihood, nDims, nDerived,
                              settings, prior, polychord_dumper)
Ejemplo n.º 7
0
def pc(test_statistic,
       transform,
       n_dim,
       observed,
       n_live=100,
       base_dir="chains/",
       file_root="pc_",
       do_clustering=False,
       resume=False,
       ev_data=False,
       feedback=0,
       **kwargs):
    """
    Nested sampling with PC
    """
    # copy key word arguments to settings object
    settings = PolyChordSettings(n_dim, 0, **kwargs)
    settings.nfail = n_live
    settings.precision_criterion = 0.
    settings.read_resume = resume
    settings.base_dir = base_dir
    settings.file_root = file_root
    settings.nlive = n_live
    settings.logLstop = observed
    settings.do_clustering = do_clustering
    settings.feedback = feedback

    loglike = pc_wrap(test_statistic)
    output = pypolychord.run_polychord(loglike, n_dim, 0, settings, transform,
                                       dumper(0, observed))

    # get number of calls directly
    calls = output.nlike

    # get log X from resume file

    label = "=== local volume -- log(<X_p>) ==="
    log_xp = None
    res_name = "{}/{}.resume".format(base_dir, file_root)

    with open(res_name) as res_file:
        for line in res_file:
            if line.strip() == label:
                next_line = res_file.readline()
                log_xp = np.array([float(e) for e in next_line.split()])
                break
        else:
            raise RuntimeError("didn't find {}".format(label))

    log_x = logsumexp(log_xp)
    n_iter = -log_x * n_live

    if not ev_data:
        return Result.from_ns(n_iter, n_live, calls)

    # get ev data
    ev_name = "{}/{}_dead.txt".format(base_dir, file_root)
    ev_data = np.genfromtxt(ev_name)
    test_statistic = ev_data[:, 0]
    log_x = -np.arange(0, len(test_statistic), 1.) / n_live
    log_x_delta = np.sqrt(-log_x / n_live)

    return Result.from_ns(n_iter, n_live,
                          calls), [test_statistic, log_x, log_x_delta]
Ejemplo n.º 8
0
def run_polychord(loglikelihood, prior, dumper, nDims, nlive, root,
                  num_repeats):
    """ Wrapper function to run PolyChord

    See https://arxiv.org/abs/1506.00171 for more detail

    Parameters
    ----------
    loglikelihood: callable
        probability function taking a single parameter:

        - theta: numpy.array
                 physical parameters, `shape=(nDims,)`

        returning a log-likelihood (float)

    prior: callable
        tranformation function taking a single parameter

        - cube: numpy.array
                hypercube parameters, `shape=(nDims,)`

        returning physical parameters (`numpy.array`)

    dumper: callable
        access function called every nlive iterations giving a window onto
        current live points. Single parameter, no return:

        - live: numpy.array
               live parameters and loglikelihoods, `shape=(nlive,nDims+1)`

    nDims: int
        Dimensionality of sampling space

    nlive: int
        Number of live points

    root: str
        base name for output files

    num_repeats: int
        Length of chain to generate new live points
    """
    import pypolychord
    from pypolychord.settings import PolyChordSettings

    basedir = os.path.dirname(root)
    cluster_dir = os.path.join(basedir,'clusters')
    with suppress(Exception): os.makedirs(cluster_dir)

    nDerived = 0
    settings = PolyChordSettings(nDims, nDerived)
    settings.base_dir = os.path.dirname(root)
    settings.file_root = os.path.basename(root)
    settings.nlive = nlive
    settings.num_repeats = num_repeats
    settings.do_clustering = True
    settings.read_resume = False

    def polychord_loglikelihood(theta):
        return loglikelihood(theta), []

    def polychord_dumper(live, dead, logweights, logZ, logZerr):
        dumper(live[:, :-1])

    pypolychord.run_polychord(polychord_loglikelihood, nDims, nDerived,
                              settings, prior, polychord_dumper)