コード例 #1
0
def test_minimize_gaussian(tmpdir):
    maxloglik = 0
    for method in reversed(valid_methods):
        NoisyCovLike.noise = 0.005 if method == 'bobyqa' else 0
        info: InputDict = {
            'likelihood': {
                'like': NoisyCovLike
            },
            "sampler": {
                "minimize": {
                    "ignore_prior": True,
                    "method": method
                }
            }
        }
        products = run(info).sampler.products()
        error = abs(maxloglik - -products["minimum"]["minuslogpost"])
        assert error < 0.01

        info['output'] = os.path.join(tmpdir, 'testmin')
        products = run(info, force=True).sampler.products()
        if mpi.is_main_process():
            from getdist.types import BestFit
            res = BestFit(info['output'] + '.bestfit').getParamDict()
            assert np.isclose(res["loglike"],
                              products["minimum"]["minuslogpost"])
            for p, v in list(res.items())[:-2]:
                assert np.isclose(products["minimum"][p], v)
コード例 #2
0
ファイル: planckStyle.py プロジェクト: qmxp55/CosmoMC
 def bestfit(self, root, want_fixed=True):
     for chain_dir in self.sampleAnalyser.chain_dirs:
         jobItem = chain_dir.resolveRoot(root)
         if jobItem:
             return BestFit(jobItem.chainRoot + '.minimum',
                            want_fixed=want_fixed)
     return None
コード例 #3
0
def get_maximum_likelihood(dummy, max_posterior, chain_min_root,
                           param_name_dict, param_label_dict):
    """
    Import the maximum likelihood file for a Cosmosis run, if present.

    :param dummy: dummy argument for interfacing, not used in practice
    :param chain_min_root: name of the minimum file or the folder that contains
        it.
    :param param_name_dict: a dictionary with the mapping between
        cosmosis names and reasonable names.
    :param param_label_dict: dictionary with the mapping between the parameter
        names and the labels.
    :return: :class:`~getdist.types.BestFit` the best fit object.
    """
    # decide if the user passed a folder or a chain:
    if os.path.isfile(chain_min_root + '.txt'):
        minimum_file = chain_min_root + '.txt'
    elif os.path.isdir(chain_min_root):
        # look for the chain file:
        temp = list(
            filter(lambda x: 'chain_pmaxlike.txt' in x,
                   os.listdir(chain_min_root)))
        if len(temp) == 0:
            raise ValueError('No minimum file found in folder', chain_min_root)
        minimum_file = chain_min_root + '/' + temp[0]
    else:
        raise ValueError('Input chain root is not a folder nor a file.')
    # get the info:
    info = get_cosmosis_info(minimum_file)
    # check the sampler:
    sampler = get_sampler_type(info)
    if sampler != 'max_like':
        raise ValueError('Minimum file appears to have a strange sampler:',
                         sampler)
    # get the parameter names:
    param_names = get_param_names(info)
    # get the parameter labels from the user provided dictionary:
    param_labels = get_param_labels(info, param_names, param_label_dict)
    # best fit:
    best_fit = BestFit()
    # set parameter names:
    if param_name_dict is not None:
        best_fit.names = []
        for name in param_names:
            if name in param_name_dict.keys():
                best_fit.names.append(ParamInfo(param_name_dict[name]))
            else:
                best_fit.names.append(ParamInfo(name))
    else:
        best_fit.names = [ParamInfo(name) for name in param_names]
    if param_labels is not None:
        for name, label in zip(best_fit.names, param_labels):
            name.label = label
    # get the numbers:
    best_fit_params = loadNumpyTxt(minimum_file, skiprows=0)[0]
    # look for weight:
    if 'weigth' in param_names:
        best_fit.weight = best_fit_params[param_names.index('weigth')]
    else:
        best_fit.weight = 1.0
    # look for log like:
    if 'post' in param_names:
        best_fit.logLike = -best_fit_params[param_names.index('post')]
    else:
        raise ValueError('Best fit file does not contain the best fit like')
    # correct log likes in getdist format:
    if 'post' in param_names:
        best_fit_params[param_names.index('post')] = \
            -best_fit_params[param_names.index('post')]
    if 'prior' in param_names:
        best_fit_params[param_names.index('prior')] = \
            -best_fit_params[param_names.index('prior')]
    if 'like' in param_names:
        best_fit_params[param_names.index('like')] = \
            -best_fit_params[param_names.index('like')]
    # get the data chi squareds:
    best_fit.chiSquareds = []
    # get the ranges to get the running parameters:
    ranges = get_ranges(info, param_names)
    # now initialize all parameters:
    for ind, param in enumerate(best_fit.names):
        param.isDerived = param.name not in ranges.keys()
        param.number = ind
        param.best_fit = best_fit_params[ind]
    #
    return best_fit
コード例 #4
0

def getdic(paramdic):
    paramdic['tau'] = 0.055
    paramdic['hslofit_version'] = 'mead'
    return paramdic


if 'lens' in plots:
    DESlens = DES_like(DESdataset,
                       dataset_params={'used_data_types': 'xip xim'})

    samples = g.sampleAnalyser.samplesForRoot('base_DESlens_DESpriors')
    planck_fit_lens = getdic(
        BestFit(
            r'C:\Tmp\Planck\2017\DES-Planck_bf\base_DESlens_DESpriors_planckbf.minimum',
            want_fixed=True).getParamDict())
    lens_theory = DESlens.get_theory_for_params(planck_fit_lens)

    planck_fit_lens['DES_AIA'] = 0
    lens_theory_noIA = DESlens.get_theory_for_params(planck_fit_lens)
    hash = hashlib.md5(
        ('%s%s%s' % (fast_test, samples.jobItem.chainRoot,
                     os.path.getmtime(samples.jobItem.chainRoot + '_1.txt'))
         ).encode('ascii')).hexdigest()
    cachename = os.path.join(tempfile.gettempdir(),
                             'planck2018' + hash + '.DESlens_samps')
    if os.path.isfile(cachename):
        print('reading cache %s' % cachename)
        with open(cachename, 'rb') as inp:
            theory_samps = pickle.load(inp)
コード例 #5
0
ファイル: CMB_plik.py プロジェクト: EFTCAMB/EFTCosmoMC
        else:
            return TT


if __name__ == "__main__":
    import time
    from getdist.types import BestFit

    tag = 'TTTEEE'
    plik = plik_likelihood(
        '/home/aml1005/git/2017/cosmomcplanck/data/clik_14.0/hi_l/plik/plik_rd12_HM_v22b_%s.clik'
        % tag)
    root = '/scratch/aml1005/Dec17/base/plikHM_%s_lowl_lowE/base_plikHM_%s_lowl_lowE' % (
        tag, tag)

    fit = BestFit('%s.minimum' % root, want_fixed=True)
    cls = ClsArray('%s.minimum.theory_cl' % root)
    params = fit.getParamDict()
    start = time.time()
    chi2_clik = plik.chi_squared(cls, params)
    print('Likelihood execution time:', time.time() - start)
    for v in fit.chiSquareds:
        if 'plik' in v[1].name:
            test_chi2 = v[1].chisq
            print('Chi-squared calculated: %s, best-fit %s' %
                  (chi2_clik, test_chi2))
            assert (np.isclose(chi2_clik, test_chi2, 0.01))
            break
    if False:  # super slow with unbinned
        start = time.time()
        chi2_python = plik.chi_squared(cls, params, False)
コード例 #6
0
ファイル: CMB_CamSpec.py プロジェクト: EFTCAMB/EFTCosmoMC
    import sys
    import time
    from getdist.types import BestFit

    sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
    dropbox = r'C:\Users\Antony Lewis\Dropbox\Planck\2017-Dec'
    dataset = r"C:\Work\Dist\git\cosmomcplanck\data\planck_internal\CamSpecHM_10_7.dataset"
    like = CamSpec_likelihood(dataset)
    likeTT = CamSpec_likelihood(dataset,
                                {'use_cl': '100x100 143x143 217x217 143x217'})

    for base in ['base', 'base_Alens']:
        for camspec, data in zip(
            [likeTT, like],
            ['CamSpecHM_TT_lowl_lowE', 'CamSpecHM_TTTEEE_lowl_lowE']):
            root = base + '_' + data
            fit = BestFit(os.path.join(dropbox, 'Bestfits/%s.minimum' % root),
                          want_fixed=True)
            cls = ClsArray(
                os.path.join(dropbox, 'Bestfits/%s.minimum.theory_cl' % root))
            start = time.time()
            chi2 = camspec.chi_squared(cls, fit.getParamDict())
            print('Likelihood execution time:', time.time() - start)
            print(root)
            for v in fit.chiSquareds:
                if 'CamSpec' in v[1].name:
                    test_chi2 = v[1].chisq
                    print('Chi-squared calculated: %s, best-fit %s' %
                          (chi2, test_chi2))
                    break