예제 #1
0
    def compute_fit(self):
        self._polychord_output = None
        data = self._observed.spectrum
        datastd = self._observed.errorBar
        sqrtpi = np.sqrt(2 * np.pi)

        ndim = len(self.fitting_parameters)

        def polychord_loglike(cube):
            # log-likelihood function called by polychord
            fit_params_container = np.array(
                [cube[i] for i in range(len(self.fitting_parameters))])
            chi_t = self.chisq_trans(fit_params_container, data, datastd)

            # print('---------START---------')
            # print('chi_t',chi_t)
            # print('LOG',loglike)
            loglike = -np.sum(np.log(datastd * sqrtpi)) - 0.5 * chi_t
            return loglike, [0.0]

        def polychord_uniform_prior(hypercube):
            # prior distributions called by polychord. Implements a uniform prior
            # converting parameters from normalised grid to uniform prior
            # print(type(cube))
            cube = [0.0] * ndim

            for idx, bounds in enumerate(self.fit_boundaries):
                # print(idx,self.fitting_parameters[idx])
                bound_min, bound_max = bounds
                cube[idx] = (hypercube[idx] *
                             (bound_max - bound_min)) + bound_min
                #print('CUBE idx',cube[idx])
            # print('-----------')
            return cube

        status = None

        datastd_mean = np.mean(datastd)

        settings = PolyChordSettings(ndim, 1)
        settings.nlive = ndim * 25
        settings.num_repeats = ndim * 5
        settings.do_clustering = self.do_clustering
        settings.num_repeats = ndim
        settings.precision_criterion = self.evidence_tolerance
        settings.logzero = -1e70
        settings.read_resume = self.resume
        settings.base_dir = self.dir_polychord
        settings.file_root = '1-'
        self.warning('Number of dimensions {}'.format(ndim))
        self.warning('Fitting parameters {}'.format(self.fitting_parameters))

        self.info('Beginning fit......')
        pypolychord.run_polychord(polychord_loglike, ndim, 1, settings,
                                  polychord_uniform_prior)
        self._polychord_output = self.store_polychord_solutions()
        print(self._polychord_output)
예제 #2
0
    def run(self):
        """
        Executes the inference
        """
        if self.prepare():
            # Setup the inference
            ndim = np.sum(self.pstep > 0)
            settings = PolyChordSettings(ndim, 0)
            settings.base_dir = self.outputdir
            settings.file_root = self.fprefix
            settings.nlive = self.nlive
            settings.read_resume = self.resume
            if self.nrepeat is not None:
                settings.num_repeat = self.nrepeat
            settings.precision_criterion = self.dlogz
            settings.grade_dims = [int(ndim)]
            settings.read_resume = False
            settings.feedback = self.verb
            # Run it
            if self.dumper is not None:
                out = pypolychord.run_polychord(self.loglike, ndim, 0,
                                                settings, self.prior,
                                                self.dumper)
            else:
                out = pypolychord.run_polychord(self.loglike, ndim, 0,
                                                settings, self.prior)

            outp = np.loadtxt(os.path.join(self.outputdir, self.fprefix) +\
                                   '_equal_weights.txt')
            self.outp = outp[:, 2:].T
            ibest = np.argmin(outp[:, 1])
            self.bestp = self.outp[:, ibest]
            # Save posterior and bestfit params
            if self.fsavefile is not None:
                np.save(self.fsavefile, self.outp)
            if self.fbestp is not None:
                np.save(self.fbestp, self.bestp)
            return self.outp, self.bestp
        else:
            if self.verb:
                print("Sampler is not fully prepared to run. " + \
                      "Correct the above errors and try again.")
        theta[i] = priordict[parnames[i]].ppf(x)

    return theta


# Define PolyChord settings
settings = PolyChordSettings(
    nDims,
    nDerived,
)
settings.do_clustering = args_params.noclust
settings.nlive = nDims * args_params.nlive
settings.base_dir = base_dir
settings.file_root = 'hd40307_k{}'.format(nplanets)  # modelpath[12:-3]
settings.num_repeats = nDims * args_params.nrep
settings.precision_criterion = args_params.prec
settings.read_resume = False

# Change settings if resume is true
if args_params.resume:
    settings.read_resume = args_params.resume
    settings.base_dir = dirname + prev_run

# Run PolyChord
output = PPC.run_polychord(logLikelihood, nDims, nDerived, settings, prior)

# Parameter names
# latexnames = [r'\sigma_J', r'C']
# for j in range(nplanets):
#     latexnames.extend(
#         [fr'K_{j}', fr'P_{j}', fr'e_{j}', fr'\omega_{j}', fr'M_{j}'])
예제 #4
0
def run_polychord(loglikelihood, prior, dumper, nDims, nlive, root, ndump,
                  num_repeats):
    """Run PolyChord.

    See https://arxiv.org/abs/1506.00171 for more detail

    Parameters
    ----------
    loglikelihood: :obj:`callable`
        probability function taking a single parameter:

        - theta: numpy.array
                 physical parameters, `shape=(nDims,)`

        returning a log-likelihood (float)

    prior: :obj:`callable`
        tranformation function taking a single parameter

        - cube: numpy.array
                hypercube parameters, `shape=(nDims,)`

        returning physical parameters (`numpy.array`)

    dumper: :obj:`callable`
        access function called every nlive iterations giving a window onto
        current live points. Single parameter, no return:

        - live:
               `numpy.array of` live parameters and loglikelihoods,
               `shape=(nlive,nDims+1)`

    nDims: int
        Dimensionality of sampling space

    nlive: int
        Number of live points

    root: str
        base name for output files

    ndump: int
        How many iterations between dumper function calls

    num_repeats: int
        Length of chain to generate new live points

    """
    import pypolychord
    from pypolychord.settings import PolyChordSettings

    nDerived = 0
    settings = PolyChordSettings(nDims, nDerived)
    settings.base_dir = os.path.dirname(root)
    settings.file_root = os.path.basename(root)
    settings.nlive = nlive
    settings.num_repeats = num_repeats
    settings.do_clustering = True
    settings.read_resume = False
    settings.compression_factor = numpy.exp(-float(ndump)/nlive)
    settings.precision_criterion = 0.01

    def polychord_loglikelihood(theta):
        return loglikelihood(theta), []

    def polychord_dumper(live, dead, logweights, logZ, logZerr):
        dumper(live[:, :-2], live[:, -1], dead[:, :-2], dead[:, -1])

    pypolychord.run_polychord(polychord_loglikelihood, nDims, nDerived,
                              settings, prior, polychord_dumper)
예제 #5
0
def pc(test_statistic,
       transform,
       n_dim,
       observed,
       n_live=100,
       base_dir="chains/",
       file_root="pc_",
       do_clustering=False,
       resume=False,
       ev_data=False,
       feedback=0,
       **kwargs):
    """
    Nested sampling with PC
    """
    # copy key word arguments to settings object
    settings = PolyChordSettings(n_dim, 0, **kwargs)
    settings.nfail = n_live
    settings.precision_criterion = 0.
    settings.read_resume = resume
    settings.base_dir = base_dir
    settings.file_root = file_root
    settings.nlive = n_live
    settings.logLstop = observed
    settings.do_clustering = do_clustering
    settings.feedback = feedback

    loglike = pc_wrap(test_statistic)
    output = pypolychord.run_polychord(loglike, n_dim, 0, settings, transform,
                                       dumper(0, observed))

    # get number of calls directly
    calls = output.nlike

    # get log X from resume file

    label = "=== local volume -- log(<X_p>) ==="
    log_xp = None
    res_name = "{}/{}.resume".format(base_dir, file_root)

    with open(res_name) as res_file:
        for line in res_file:
            if line.strip() == label:
                next_line = res_file.readline()
                log_xp = np.array([float(e) for e in next_line.split()])
                break
        else:
            raise RuntimeError("didn't find {}".format(label))

    log_x = logsumexp(log_xp)
    n_iter = -log_x * n_live

    if not ev_data:
        return Result.from_ns(n_iter, n_live, calls)

    # get ev data
    ev_name = "{}/{}_dead.txt".format(base_dir, file_root)
    ev_data = np.genfromtxt(ev_name)
    test_statistic = ev_data[:, 0]
    log_x = -np.arange(0, len(test_statistic), 1.) / n_live
    log_x_delta = np.sqrt(-log_x / n_live)

    return Result.from_ns(n_iter, n_live,
                          calls), [test_statistic, log_x, log_x_delta]