Ejemplo n.º 1
0
    def testGetDist(self):
        from getdist.command_line import getdist_command

        os.chdir(self.tempdir)
        res = getdist_command([self.root])
        # Note this can fail if your local analysis defaults changes the default ignore_rows
        self.assertTrue('-Ln(mean like)  = 2.31' in res, res)
        fname = 'testchain_pars.ini'
        getdist_command(['--make_param_file', fname])
        ini = IniFile(fname)
        ini.params['no_plots'] = False
        ini.params['plot_2D_num'] = 1
        ini.params['plot1'] = 'x y'
        ini.params['num_3D_plots'] = 1
        ini.params['3D_plot1'] = 'x y x'
        ini.params['triangle_params'] = '*[xy]*'

        ini.saveFile(fname)
        res = getdist_command([fname, self.root])
        self.assertTrue('-Ln(mean like)  = 2.31' in res)

        def check_run():
            for f in ['.py', '_2D.py', '_3D.py', '_tri.py']:
                pyname = self.root + f
                self.assertTrue(os.path.isfile(pyname))
                subprocess.check_output(['python', pyname])
                pdf = self.root + f.replace('py', 'pdf')
                self.assertTrue(os.path.isfile(pdf))
                os.remove(pdf)
                os.remove(pyname)

        check_run()
Ejemplo n.º 2
0
    def getLikelihood(self, paramtag, datatag, want_unbinned=False):
        # get python likelihood, assuming TT or TTTEEE likelihood, plik or Camspec
        jobItem = self.getJobItem(paramtag, datatag)
        ini = IniFile(jobItem.chainRoot + '.inputparams')
        plik_file = ini.string('clik_data_plik', '')
        if plik_file:
            params = ini.string('clik_params_plik', '')
            if not os.path.exists(plik_file):
                plik_file = os.path.join(os.path.dirname(__file__), r'../',
                                         plik_file.replace('%DATASETDIR%', 'data/'))
                params = os.path.join(os.path.dirname(__file__), r'../',
                                      params.replace('%DATASETDIR%', 'data/'))
                if not os.path.exists(plik_file):
                    raise Exception('plik file to found %s' % plik_file)

            from planck.CMB_plik import plik_likelihood
            like = plik_likelihood(plik_file, params)
            if want_unbinned:
                return like, plik_likelihood(plik_file.replace('.clik', '_bin1.clik'), params)
            else:
                return like
        else:
            from planck.CMB_CamSpec import CamSpec_likelihood

            fname = os.path.join(os.path.dirname(__file__), r'../data/planck_internal/CamSpecHM_10_7.dataset')
            if '_TT_' in jobItem.chainRoot:
                like = CamSpec_likelihood(fname, {'use_cl': '100x100 143x143 217x217 143x217'})
            else:
                like = CamSpec_likelihood(fname)

            if want_unbinned:
                return like, like
            else:
                return like
Ejemplo n.º 3
0
 def propertiesIni(self):
     if os.path.exists(self.propertiesIniFile()):
         return IniFile(self.propertiesIniFile())
     else:
         ini = IniFile()
         ini.original_filename = self.propertiesIniFile()
         return ini
Ejemplo n.º 4
0
 def load_dataset_file(self, filename, dataset_params):
     if '.dataset' not in filename:
         filename += '.dataset'
     ini = IniFile(filename)
     self.dataset_filename = filename
     ini.params.update(self.default_dataset_params)
     ini.params.update(dataset_params or {})
     self.init_params(ini)
Ejemplo n.º 5
0
    def getLikelihood(self, paramtag, datatag, want_unbinned=False):
        # get python likelihood, assuming TT or TTTEEE likelihood, plik or Camspec
        jobItem = self.getJobItem(paramtag, datatag)
        ini = IniFile(jobItem.chainRoot + '.inputparams')
        plik_file = ini.string('clik_data_plik', '')
        if plik_file:
            params = ini.string('clik_params_plik', '')
            if not os.path.exists(plik_file):
                plik_file = os.path.join(
                    os.path.dirname(__file__), r'../',
                    plik_file.replace('%DATASETDIR%', 'data/'))
                params = os.path.join(os.path.dirname(__file__), r'../',
                                      params.replace('%DATASETDIR%', 'data/'))
                if not os.path.exists(plik_file):
                    raise Exception('plik file to found %s' % plik_file)

            from planck.CMB_plik import plik_likelihood
            like = plik_likelihood(plik_file, params)
            if want_unbinned:
                return like, plik_likelihood(
                    plik_file.replace('.clik', '_bin1.clik'), params)
            else:
                return like
        else:
            from planck.CMB_CamSpec import CamSpec_likelihood

            fname = os.path.join(
                os.path.dirname(__file__),
                r'../data/planck_internal/CamSpecHM_10_7.dataset')
            if '_TT_' in jobItem.chainRoot:
                like = CamSpec_likelihood(
                    fname, {'use_cl': '100x100 143x143 217x217 143x217'})
            else:
                like = CamSpec_likelihood(fname)

            if want_unbinned:
                return like, like
            else:
                return like
 def loadDataset(self, froot, dataset_params):
     if not '.dataset' in froot: froot += '.dataset'
     ini = IniFile(froot)
     ini.params.update(dataset_params)
     self.readIni(ini)
Ejemplo n.º 7
0
elif args.plot_data is not None:
    data_dir = os.path.abspath(args.plot_data) + os.sep
else:
    data_dir = None

if data_dir: checkDir(data_dir)

ini_dir = batch.batchPath + 'getdist' + os.sep

checkDir(ini_dir)

if args.delay: time.sleep(args.delay)
processes = set()

for jobItem in Opts.filteredBatchItems():
    ini = IniFile()
    ini.params['file_root'] = jobItem.chainRoot
    ini.params['batch_path'] = jobItem.batchPath
    checkDir(jobItem.distPath)
    ini.params['out_dir'] = jobItem.distPath
    if data_dir: ini.params['plot_data_dir'] = data_dir
    custom_plot = batch.commonPath + 'plots' + os.sep + jobItem.paramtag + '.ini'
    custom_plot2 = batch.commonPath + 'plots' + os.sep + jobItem.name + '.ini'
    if os.path.exists(custom_plot2):
        ini.includes.append(custom_plot2)
    elif os.path.exists(custom_plot):
        ini.includes.append(custom_plot)
    if os.path.exists(args.base_ini):
        ini.defaults.append(args.base_ini)
    elif os.path.exists(batch.commonPath + args.base_ini):
        ini.defaults.append(batch.commonPath + args.base_ini)
def make_forecast_cmb_dataset(input_cl_file, output_root, output_dir=None, noise_muK_arcmin_T=None,
                              noise_muK_arcmin_P=None, NoiseVar=None, ENoiseFac=2, fwhm_arcmin=None,
                              lmin=2, lmax=None, fsky=1, fields_use=None,
                              lens_recon_noise=None, cl_data_cols=''):
    """
    Make a simulated .dataset and associated files with 'data' set at the input fiducial model.

    :param input_cl_file: input fiducial CL
    :param output_root: root name for output files, e.g. 'my_sim1'
    :param output_dir: output directory
    :param noise_muK_arcmin_T: temperature noise in muK-arcmin
    :param noise_muK_arcmin_P: polarization noise in muK-arcmin
    :param NoiseVar: effective isotropic noise variance for the temperature (N_L=NoiseVar with no beam)
    :param ENoiseFac: factor by which polarization noise variance is higher (usually 2, for Planck about 4
                        as only half the detectors polarized)
    :param fwhm_arcmin: beam fwhm in arcminutes
    :param lmin: l_min
    :param lmax: l_max
    :param fsky: sky fraction
    :param fields_use: optional list of fields to restict to (e.g. 'T E')
    :param lens_recon_noise: optional array, starting at L=0, for the PP lensing reconstruction noise, in [L(L+1)]^2C_L^phi/2pi units
    :param cl_data_cols: if not specified in file header, order of columns in input CL file (e.g. 'TT TE EE BB PP')
    :return:
    """

    use_lensing = lens_recon_noise
    use_CMB = noise_muK_arcmin_T or NoiseVar is not None

    ini = IniFile()
    dataset = ini.params

    if not cl_data_cols:
        cl_data_cols = lastTopComment(input_cl_file)
        if not cl_data_cols:
            raise Exception('input CL file must specific names of columns (TT TE EE..)')
    else:
        dataset['cl_hat_order'] = cl_data_cols

    if use_CMB:
        if NoiseVar is None:
            if noise_muK_arcmin_T is None:
                raise ValueError('Must specify noise')
            NoiseVar = (noise_muK_arcmin_T * np.pi / 180 / 60.) ** 2
            if noise_muK_arcmin_P is not None:
                ENoiseFac = (noise_muK_arcmin_P / noise_muK_arcmin_T) ** 2
        elif noise_muK_arcmin_T is not None or noise_muK_arcmin_P is not None:
            raise ValueError('Specific either noise_muK_arcmin or NoiseVar')
        if not fields_use:
            fields_use = ''
            if 'TT' or 'TE' in cl_data_cols: fields_use = 'T'
            if 'EE' or 'TE' in cl_data_cols: fields_use += ' E'
            if 'BB' in cl_data_cols: fields_use += ' B'
            if 'PP' in cl_data_cols and use_lensing: fields_use += ' P'
    else:
        fields_use = fields_use or 'P'

    if output_dir is None:
        output_dir = os.path.join(os.path.dirname(__file__), '..', 'data', output_root)
    if not os.path.exists(output_dir): os.makedirs(output_dir)

    dataset['fields_use'] = fields_use

    if use_CMB:
        fwhm = fwhm_arcmin / 60
        xlc = 180 * np.sqrt(8. * np.log(2.)) / np.pi
        sigma2 = (fwhm / xlc) ** 2
        noise_cols = 'TT           EE          BB'
        if use_lensing: noise_cols += '          PP'
    elif use_lensing:
        noise_cols = 'PP'
    noise_file = output_root + '_Noise.dat'
    with open(os.path.join(output_dir, noise_file), 'w') as f:
        f.write('#L %s\n' % noise_cols)

        for l in range(lmin, lmax + 1):
            NoiseCl = l * (l + 1.) / 2 / np.pi * NoiseVar * np.exp(l * (l + 1) * sigma2)
            noises = []
            if use_CMB: noises += [NoiseCl, ENoiseFac * NoiseCl, ENoiseFac * NoiseCl]
            if use_lensing: noises += [lens_recon_noise[l]]
            f.write("%d " % l + " ".join("%E" % elem for elem in noises) + "\n")

    dataset['fullsky_exact_fksy'] = fsky
    dataset['dataset_format'] = 'CMBLike2'
    dataset['like_approx'] = 'exact'

    dataset['cl_lmin'] = lmin
    dataset['cl_lmax'] = lmax

    dataset['binned'] = False

    dataset['cl_hat_includes_noise'] = False

    shutil.copy(input_cl_file, os.path.join(output_dir, output_root + '.dat'))
    dataset['cl_hat_file'] = output_root + '.dat'
    dataset['cl_noise_file '] = noise_file

    ini.saveFile(os.path.join(output_dir, output_root + '.dataset'))
    'ombh2': 'omegabh2',
    'omch2': 'omegach2',
    'omnuh2': 'omeganuh2',
    'hubble': 'H0',
    'w': 'w',
    'helium_fraction': 'yheused',
    'scalar_amp(1)': 'A',
    'scalar_spectral_index(1)': 'ns',
    'scalar_nrun(1)': 'nrun',
    'initial_ratio(1)': 'r',
    're_optical_depth': 'tau',
    're_delta_redshift': 'deltazrei',
    'massless_neutrinos': 'nnu'
}

ini = IniFile()

ini.params['re_use_optical_depth'] = True
ini.params['temp_cmb'] = 2.7255
ini.params['CMB_outputscale'] = 2.7255e6**2.
ini.defaults.append('params.ini')

bf = types.BestFit(root + '.minimum',
                   setParamNameFile=root + '.paramnames',
                   want_fixed=True)

for camb, cosmomc in list(pars.items()):
    par = bf.parWithName(cosmomc)
    if par is not None: ini.params[camb] = par.best_fit

ini.params['scalar_amp(1)'] = float(ini.params['scalar_amp(1)']) / 1e9
Ejemplo n.º 10
0
    def __init__(self, dataset, dataset_params={}, alpha_beta_names=['alpha', 'beta'],
                 marginalize=False, marginalize_params=_marge_params, precompute_covmats=True, silent=False):
        """

        :param dataset: .dataset file with settings
        :param dataset_params:  dictionary of any parameter to override in teh .dataset file
        :param alpha_beta_names: names of alpha and beta parameters if used and varied
        :param marginalize: Marginalize over alpha, beta by dumb grid integration (slow, but useful for importance sampling)
        :param marginalize_params: Dictionary of options for the grid marguinalization
        :param precompute_covmats: if marginalizing, pre-compute covariance inverses at expense of memory (~600MB).
        :param silent:  Don't print out stuff
        """

        def relative_path(tag):
            name = ini.string(tag).replace('data/', '').replace('Pantheon/', '')
            if ini.original_filename is not None:
                return os.path.join(os.path.dirname(ini.original_filename), name)
            return name

        # has_absdist = F, intrinsicdisp=0, idispdataset=False
        if not silent: print('loading: %s' % dataset)
        ini = IniFile(dataset)
        ini.params.update(dataset_params)
        self.name = ini.string('name')
        data_file = relative_path('data_file')
        self.twoscriptmfit = ini.bool('twoscriptmfit')
        if self.twoscriptmfit:
            scriptmcut = ini.float('scriptmcut', 10.)

        assert not ini.float('intrinsicdisp', 0) and not ini.float('intrinsicdisp0', 0)
        self.alpha_beta_names = alpha_beta_names
        if alpha_beta_names is not None:
            self.alpha_name = alpha_beta_names[0]
            self.beta_name = alpha_beta_names[1]

        self.marginalize = marginalize

        self.pecz = ini.float('pecz', 0.001)

        cols = None
        self.has_third_var = False

        if not silent:
            print('Supernovae name: %s' % self.name)
            print('Reading %s' % data_file)
        supernovae = {}
        self.names = []
        ix = 0
        with io.open(data_file, 'r') as f:
            lines = f.readlines()
            for line in lines:
                if '#' in line:
                    cols = line[1:].split()
                    for rename, new in zip(['mb', 'color', 'x1', '3rdvar', 'd3rdvar', 'cov_m_s', 'cov_m_c', 'cov_s_c'],
                                           ['mag', 'colour', 'stretch', 'third_var', 'dthird_var', 'cov_mag_stretch',
                                            'cov_mag_colour', 'cov_stretch_colour']):
                        if rename in cols:
                            cols[cols.index(rename)] = new
                    self.has_third_var = 'third_var' in cols
                    zeros = np.zeros(len(lines) - 1)
                    self.third_var = zeros.copy()
                    self.dthird_var = zeros.copy()
                    self.set = zeros.copy()
                    for col in cols:
                        setattr(self, col, zeros.copy())
                elif line.strip():
                    if cols is None: raise Exception('Data file must have comment header')
                    vals = line.split()
                    for i, (col, val) in enumerate(zip(cols, vals)):
                        if col == 'name':
                            supernovae[val] = ix
                            self.names.append(val)
                        else:
                            getattr(self, col)[ix] = np.float64(val)
                    ix += 1

        self.z_var = self.dz ** 2
        self.mag_var = self.dmb ** 2
        self.stretch_var = self.dx1 ** 2
        self.colour_var = self.dcolor ** 2
        self.thirdvar_var = self.dthird_var ** 2
        self.nsn = ix
        if not silent: print('Number of SN read: %s ' % self.nsn)

        if self.twoscriptmfit and not self.has_third_var:
            raise Exception('twoscriptmfit was set but thirdvar information not present')

        if ini.bool('absdist_file'): raise Exception('absdist_file not supported')

        covmats = ['mag', 'stretch', 'colour', 'mag_stretch', 'mag_colour', 'stretch_colour']
        self.covs = {}
        for name in covmats:
            if ini.bool('has_%s_covmat' % name):
                if not silent: print('Reading covmat for: %s ' % name)
                self.covs[name] = self._read_covmat(relative_path('%s_covmat_file' % name))

        self.alphabeta_covmat = len(self.covs.items()) > 1 or self.covs.get('mag', None) is None
        self._last_alpha = np.inf
        self._last_beta = np.inf
        if alpha_beta_names is None and not marginalize: raise ValueError('Must give alpha, beta')
        assert self.covs

        # jla_prep
        zfacsq = 25.0 / np.log(10.0) ** 2
        self.pre_vars = self.mag_var + zfacsq * self.pecz ** 2 * (
                (1.0 + self.zcmb) / (self.zcmb * (1 + 0.5 * self.zcmb))) ** 2

        if self.twoscriptmfit:
            A1 = np.zeros(self.nsn)
            A2 = np.zeros(self.nsn)
            A1[self.third_var <= scriptmcut] = 1
            A2[self.third_var > scriptmcut] = 1
            has_A1 = np.any(A1)
            has_A2 = np.any(A2)
            if not has_A1:
                # swap
                A1 = A2
                A2 = np.zeros(self.nsn)
                has_A2 = False

            if not has_A2:
                self.twoscriptmfit = False
            self.A1 = A1
            self.A2 = A2

        if marginalize:
            self.marge_params = _marge_params.copy()
            self.marge_params.update(marginalize_params)
            self.step_width_alpha = self.marge_params['step_width_alpha']
            self.step_width_beta = self.marge_params['step_width_beta']
            _marge_steps = self.marge_params['marge_steps']
            self.alpha_grid = np.empty((2 * _marge_steps + 1) ** 2)
            self.beta_grid = self.alpha_grid.copy()
            _int_points = 0
            for alpha_i in range(-_marge_steps, _marge_steps + 1):
                for beta_i in range(-_marge_steps, _marge_steps + 1):
                    if alpha_i ** 2 + beta_i ** 2 <= _marge_steps ** 2:
                        self.alpha_grid[_int_points] = self.marge_params[
                                                           'alpha_centre'] + alpha_i * self.step_width_alpha
                        self.beta_grid[_int_points] = self.marge_params['beta_centre'] + beta_i * self.step_width_beta
                        _int_points += 1
            if not silent: print('Marignalizing alpha, beta over %s points' % _int_points)
            self.marge_grid = np.empty(_int_points)
            self.int_points = _int_points
            self.alpha_grid = self.alpha_grid[:_int_points]
            self.beta_grid = self.beta_grid[:_int_points]
            self.invcovs = np.empty(_int_points, dtype=np.object)
            if precompute_covmats:
                for i, (alpha, beta) in enumerate(zip(self.alpha_grid, self.beta_grid)):
                    self.invcovs[i] = self.inverse_covariance_matrix(alpha, beta)

        elif not self.alphabeta_covmat:
            self.inverse_covariance_matrix()
Ejemplo n.º 11
0
    def load_dataset(self, filename, dataset_params):
        from getdist import IniFile

        ini = IniFile(filename)
        ini.params.update(dataset_params)
        self.indices = []
        self.used_indices = []
        self.used_items = []
        self.fullcov = np.loadtxt(ini.relativeFileName('cov_file'))
        ntheta = ini.int('num_theta_bins')
        self.theta_bins = np.loadtxt(ini.relativeFileName('theta_bins_file'))
        self.iintrinsic_alignment_model = ini.string(
            'intrinsic_alignment_model')

        self.data_types = ini.string('data_types').split()
        self.used_types = ini.list('used_data_types', self.data_types)
        with open(ini.relativeFileName('data_selection')) as f:
            header = f.readline()
            assert ('#  type bin1 bin2 theta_min theta_max' == header.strip())
            lines = f.readlines()
        ranges = {}
        for tp in self.data_types:
            ranges[tp] = np.empty((6, 6), dtype=object)
        for line in lines:
            items = line.split()
            if items[0] in self.used_types:
                bin1, bin2 = [int(x) - 1 for x in items[1:3]]
                ranges[items[0]][bin1][bin2] = [
                    np.float64(x) for x in items[3:]
                ]

        self.ranges = ranges

        self.nzbins = ini.int('num_z_bins')  # for lensing sources
        self.nwbins = ini.int('num_gal_bins', 0)  # for galaxies
        maxbin = max(self.nzbins, self.nwbins)

        cov_ix = 0
        self.bin_pairs = []
        self.data_arrays = []
        self.thetas = []
        for i, tp in enumerate(self.data_types):
            xi = np.loadtxt(ini.relativeFileName('measurements[%s]' % tp))
            bin1 = xi[:, 0].astype(np.int) - 1
            bin2 = xi[:, 1].astype(np.int) - 1
            tbin = xi[:, 2].astype(np.int) - 1
            corr = np.empty((maxbin, maxbin), dtype=np.object)
            corr[:, :] = None
            self.data_arrays.append(corr)
            self.bin_pairs.append([])
            for f1, f2, ix, dat in zip(bin1, bin2, tbin, xi[:, 3]):
                self.indices.append((i, f1, f2, ix))
                if not (f1, f2) in self.bin_pairs[i]:
                    self.bin_pairs[i].append((f1, f2))
                    corr[f1, f2] = np.zeros(ntheta)
                corr[f1, f2][ix] = dat
                if ranges[tp][f1, f2] is not None:
                    mn, mx = ranges[tp][f1, f2]
                    if self.theta_bins[ix] > mn and self.theta_bins[ix] < mx:
                        self.thetas.append(self.theta_bins[ix])
                        self.used_indices.append(cov_ix)
                        self.used_items.append(self.indices[-1])
                cov_ix += 1

        nz_source = np.loadtxt(ini.relativeFileName('nz_file'))
        self.zmid = nz_source[:, 1]
        self.zbin_sp = []
        for b in range(self.nzbins):
            self.zbin_sp += [
                UnivariateSpline(self.zmid, nz_source[:, b + 3], s=0)
            ]

        nz_lens = np.loadtxt(ini.relativeFileName('nz_gal_file'))
        assert (np.array_equal(nz_lens[:, 1], self.zmid))
        self.zbin_w_sp = []
        for b in range(self.nwbins):
            self.zbin_w_sp += [
                UnivariateSpline(self.zmid, nz_lens[:, b + 3], s=0)
            ]

        self.zmax = self.zmid[-1]

        self.kmax = ini.float(
            'kmax', 15)  # Actually computed, assumes extrapolated beyond that
        self._initialize()
def make_forecast_cmb_dataset(input_cl_file,
                              output_root,
                              output_dir=None,
                              noise_muK_arcmin_T=None,
                              noise_muK_arcmin_P=None,
                              NoiseVar=None,
                              ENoiseFac=2,
                              fwhm_arcmin=None,
                              lmin=2,
                              lmax=None,
                              fsky=1,
                              fields_use=None,
                              lens_recon_noise=None,
                              cl_data_cols=''):
    """
    Make a simulated .dataset and associated files with 'data' set at the input fiducial model.

    :param input_cl_file: input fiducial CL
    :param output_root: root name for output files, e.g. 'my_sim1'
    :param output_dir: output directory
    :param noise_muK_arcmin_T: temperature noise in muK-arcmin
    :param noise_muK_arcmin_P: polarization noise in muK-arcmin
    :param NoiseVar: effective isotropic noise variance for the temperature (N_L=NoiseVar with no beam)
    :param ENoiseFac: factor by which polarization noise variance is higher (usually 2, for Planck about 4
                        as only half the detectors polarized)
    :param fwhm_arcmin: beam fwhm in arcminutes
    :param lmin: l_min
    :param lmax: l_max
    :param fsky: sky fraction
    :param fields_use: optional list of fields to restict to (e.g. 'T E')
    :param lens_recon_noise: optional array, starting at L=0, for the PP lensing reconstruction noise, in [L(L+1)]^2C_L^phi/2pi units
    :param cl_data_cols: if not specified in file header, order of columns in input CL file (e.g. 'TT TE EE BB PP')
    :return:
    """

    use_lensing = lens_recon_noise
    use_CMB = noise_muK_arcmin_T or NoiseVar is not None

    ini = IniFile()
    dataset = ini.params

    if not cl_data_cols:
        cl_data_cols = lastTopComment(input_cl_file)
        if not cl_data_cols:
            raise Exception(
                'input CL file must specific names of columns (TT TE EE..)')
    else:
        dataset['cl_hat_order'] = cl_data_cols

    if use_CMB:
        if NoiseVar is None:
            if noise_muK_arcmin_T is None:
                raise ValueError('Must specify noise')
            NoiseVar = (noise_muK_arcmin_T * np.pi / 180 / 60.)**2
            if noise_muK_arcmin_P is not None:
                ENoiseFac = (noise_muK_arcmin_P / noise_muK_arcmin_T)**2
        elif noise_muK_arcmin_T is not None or noise_muK_arcmin_P is not None:
            raise ValueError('Specific either noise_muK_arcmin or NoiseVar')
        if not fields_use:
            fields_use = ''
            if 'TT' or 'TE' in cl_data_cols: fields_use = 'T'
            if 'EE' or 'TE' in cl_data_cols: fields_use += ' E'
            if 'BB' in cl_data_cols: fields_use += ' B'
            if 'PP' in cl_data_cols and use_lensing: fields_use += ' P'
    else:
        fields_use = fields_use or 'P'

    if output_dir is None:
        output_dir = os.path.join(os.path.dirname(__file__), '..', 'data',
                                  output_root)
    if not os.path.exists(output_dir): os.makedirs(output_dir)

    dataset['fields_use'] = fields_use

    if use_CMB:
        fwhm = fwhm_arcmin / 60
        xlc = 180 * np.sqrt(8. * np.log(2.)) / np.pi
        sigma2 = (fwhm / xlc)**2
        noise_cols = 'TT           EE          BB'
        if use_lensing: noise_cols += '          PP'
    elif use_lensing:
        noise_cols = 'PP'
    noise_file = output_root + '_Noise.dat'
    with open(os.path.join(output_dir, noise_file), 'w') as f:
        f.write('#L %s\n' % noise_cols)

        for l in range(lmin, lmax + 1):
            NoiseCl = l * (l + 1.) / 2 / np.pi * NoiseVar * np.exp(
                l * (l + 1) * sigma2)
            noises = []
            if use_CMB:
                noises += [NoiseCl, ENoiseFac * NoiseCl, ENoiseFac * NoiseCl]
            if use_lensing: noises += [lens_recon_noise[l]]
            f.write("%d " % l + " ".join("%E" % elem
                                         for elem in noises) + "\n")

    dataset['fullsky_exact_fksy'] = fsky
    dataset['dataset_format'] = 'CMBLike2'
    dataset['like_approx'] = 'exact'

    dataset['cl_lmin'] = lmin
    dataset['cl_lmax'] = lmax

    dataset['binned'] = False

    dataset['cl_hat_includes_noise'] = False

    shutil.copy(input_cl_file, os.path.join(output_dir, output_root + '.dat'))
    dataset['cl_hat_file'] = output_root + '.dat'
    dataset['cl_noise_file '] = noise_file

    ini.saveFile(os.path.join(output_dir, output_root + '.dataset'))
Ejemplo n.º 13
0
    def __init__(self, dataset=dataDir, dataset_params={}, alpha_beta_names=['alpha', 'beta'],
                 marginalize=False, marginalize_params=_marge_params, precompute_covmats=True, silent=False):
        """

        :param dataset: .dataset file with settings
        :param dataset_params:  dictionary of any parameter to override in teh .dataset file
        :param alpha_beta_names: names of alpha and beta parameters if used and varied
        :param marginalize: Marginalize over alpha, beta by dumb grid integration (slow, but useful for importance sampling)
        :param marginalize_params: Dictionary of options for the grid marguinalization
        :param precompute_covmats: if marginalizing, pre-compute covariance inverses at expense of memory (~600MB).
        :param silent:  Don't print out stuff
        """

        def relative_path(tag):
            name = ini.string(tag).replace('data/', '').replace('Pantheon/', '')
            if ini.original_filename is not None:
                return os.path.join(os.path.dirname(ini.original_filename), name)
            return name

        # has_absdist = F, intrinsicdisp=0, idispdataset=False
        if not silent: print('loading: %s' % dataset)
        ini = IniFile(dataset)
        ini.params.update(dataset_params)
        self.name = ini.string('name')
        data_file = relative_path('data_file')
        self.twoscriptmfit = ini.bool('twoscriptmfit')
        if self.twoscriptmfit:
            scriptmcut = ini.float('scriptmcut', 10.)

        assert not ini.float('intrinsicdisp', 0) and not ini.float('intrinsicdisp0', 0)
        self.alpha_beta_names = alpha_beta_names
        if alpha_beta_names is not None:
            self.alpha_name = alpha_beta_names[0]
            self.beta_name = alpha_beta_names[1]

        self.marginalize = marginalize

        self.pecz = ini.float('pecz', 0.001)

        cols = None
        self.has_third_var = False

        if not silent:
            print('Supernovae name: %s' % self.name)
            print('Reading %s' % data_file)
        supernovae = {}
        self.names = []
        ix = 0
        with io.open(data_file, 'r') as f:
            lines = f.readlines()
            for line in lines:
                if '#' in line:
                    cols = line[1:].split()
                    for rename, new in zip(['mb', 'color', 'x1', '3rdvar', 'd3rdvar', 'cov_m_s', 'cov_m_c', 'cov_s_c'],
                                           ['mag', 'colour', 'stretch', 'third_var', 'dthird_var', 'cov_mag_stretch',
                                            'cov_mag_colour', 'cov_stretch_colour']):
                        if rename in cols:
                            cols[cols.index(rename)] = new
                    self.has_third_var = 'third_var' in cols
                    zeros = np.zeros(len(lines) - 1)
                    self.third_var = zeros.copy()
                    self.dthird_var = zeros.copy()
                    self.set = zeros.copy()
                    for col in cols:
                        setattr(self, col, zeros.copy())
                elif line.strip():
                    if cols is None: raise Exception('Data file must have comment header')
                    vals = line.split()
                    for i, (col, val) in enumerate(zip(cols, vals)):
                        if col == 'name':
                            supernovae[val] = ix
                            self.names.append(val)
                        else:
                            getattr(self, col)[ix] = np.float64(val)
                    ix += 1

        self.z_var = self.dz ** 2
        self.mag_var = self.dmb ** 2
        self.stretch_var = self.dx1 ** 2
        self.colour_var = self.dcolor ** 2
        self.thirdvar_var = self.dthird_var ** 2
        self.nsn = ix
        if not silent: print('Number of SN read: %s ' % self.nsn)

        if self.twoscriptmfit and not self.has_third_var:
            raise Exception('twoscriptmfit was set but thirdvar information not present')

        if ini.bool('absdist_file'): raise Exception('absdist_file not supported')

        covmats = ['mag', 'stretch', 'colour', 'mag_stretch', 'mag_colour', 'stretch_colour']
        self.covs = {}
        for name in covmats:
            if ini.bool('has_%s_covmat' % name):
                if not silent: print('Reading covmat for: %s ' % name)
                self.covs[name] = self._read_covmat(relative_path('%s_covmat_file' % name))

        self.alphabeta_covmat = len(self.covs.items()) > 1 or self.covs.get('mag', None) is None
        self._last_alpha = np.inf
        self._last_beta = np.inf
        if alpha_beta_names is None and not marginalize: raise ValueError('Must give alpha, beta')
        assert self.covs

        # jla_prep
        zfacsq = 25.0 / np.log(10.0) ** 2
        self.pre_vars = self.mag_var + zfacsq * self.pecz ** 2 * (
                (1.0 + self.zcmb) / (self.zcmb * (1 + 0.5 * self.zcmb))) ** 2

        if self.twoscriptmfit:
            A1 = np.zeros(self.nsn)
            A2 = np.zeros(self.nsn)
            A1[self.third_var <= scriptmcut] = 1
            A2[self.third_var > scriptmcut] = 1
            has_A1 = np.any(A1)
            has_A2 = np.any(A2)
            if not has_A1:
                # swap
                A1 = A2
                A2 = np.zeros(self.nsn)
                has_A2 = False

            if not has_A2:
                self.twoscriptmfit = False
            self.A1 = A1
            self.A2 = A2

        if marginalize:
            self.marge_params = _marge_params.copy()
            self.marge_params.update(marginalize_params)
            self.step_width_alpha = self.marge_params['step_width_alpha']
            self.step_width_beta = self.marge_params['step_width_beta']
            _marge_steps = self.marge_params['marge_steps']
            self.alpha_grid = np.empty((2 * _marge_steps + 1) ** 2)
            self.beta_grid = self.alpha_grid.copy()
            _int_points = 0
            for alpha_i in range(-_marge_steps, _marge_steps + 1):
                for beta_i in range(-_marge_steps, _marge_steps + 1):
                    if alpha_i ** 2 + beta_i ** 2 <= _marge_steps ** 2:
                        self.alpha_grid[_int_points] = self.marge_params[
                                                           'alpha_centre'] + alpha_i * self.step_width_alpha
                        self.beta_grid[_int_points] = self.marge_params['beta_centre'] + beta_i * self.step_width_beta
                        _int_points += 1
            if not silent: print('Marignalizing alpha, beta over %s points' % _int_points)
            self.marge_grid = np.empty(_int_points)
            self.int_points = _int_points
            self.alpha_grid = self.alpha_grid[:_int_points]
            self.beta_grid = self.beta_grid[:_int_points]
            self.invcovs = np.empty(_int_points, dtype=np.object)
            if precompute_covmats:
                for i, (alpha, beta) in enumerate(zip(self.alpha_grid, self.beta_grid)):
                    self.invcovs[i] = self.inverse_covariance_matrix(alpha, beta)

        elif not self.alphabeta_covmat:
            self.inverse_covariance_matrix()
Ejemplo n.º 14
0
def main(args):
    no_plots = False
    chain_root = args.chain_root
    if args.ini_file is None and chain_root is None:
        doError('Must give either a .ini file of parameters or a chain file root name. Run "GetDist.py -h" for help.')
    if not ".ini" in args.ini_file and chain_root is None:
        # use default settings acting on chain_root, no plots
        chain_root = args.ini_file
        args.ini_file = getdist.default_getdist_settings
        no_plots = True
    if not os.path.isfile(args.ini_file):
        doError("Parameter file does not exist: " + args.ini_file)
    if chain_root and chain_root.endswith(".txt"):
        chain_root = chain_root[:-4]

    # Input parameters
    ini = IniFile(args.ini_file)

    # File root
    if chain_root is not None:
        in_root = chain_root
    else:
        in_root = ini.params["file_root"]
    if not in_root:
        doError("Chain Root file name not given ")
    rootname = os.path.basename(in_root)

    if args.ignore_rows is not None:
        ignorerows = args.ignore_rows
    else:
        ignorerows = ini.float("ignore_rows", 0.0)

    samples_are_chains = ini.bool("samples_are_chains", True)

    # Create instance of MCSamples
    mc = MCSamples(in_root, files_are_chains=samples_are_chains)

    mc.initParameters(ini)

    if ini.bool("adjust_priors", False) or ini.bool("map_params", False):
        doError(
            "To adjust priors or define new parameters, use a separate python script; see the python getdist docs for examples"
        )

    plot_ext = ini.string("plot_ext", "py")
    finish_run_command = ini.string("finish_run_command", "")

    no_plots = ini.bool("no_plots", no_plots)
    plots_only = ini.bool("plots_only", False)
    no_tests = plots_only or ini.bool("no_tests", False)

    thin_factor = ini.int("thin_factor", 0)
    thin_cool = ini.float("thin_cool", 1.0)

    make_single_samples = ini.bool("make_single_samples", False)
    single_thin = ini.int("single_thin", 1)
    cool = ini.float("cool", 1.0)

    chain_exclude = ini.int_list("exclude_chain")

    shade_meanlikes = ini.bool("shade_meanlikes", False)
    plot_meanlikes = ini.bool("plot_meanlikes", False)

    out_dir = ini.string("out_dir", "./")
    if out_dir:
        if not os.path.isdir(out_dir):
            os.mkdir(out_dir)
        print("producing files in directory ", out_dir)
    mc.out_dir = out_dir

    out_root = ini.string("out_root", "")
    if out_root:
        rootname = out_root
        print("producing files with with root ", out_root)
    mc.rootname = rootname

    rootdirname = os.path.join(out_dir, rootname)
    mc.rootdirname = rootdirname

    if "do_minimal_1d_intervals" in ini.params:
        doError("do_minimal_1d_intervals no longer used; set credible_interval_threshold instead")

    line = ini.string("PCA_params", "")
    if line.lower() == "all":
        PCA_params = mc.paramNames.list()
    else:
        PCA_params = line.split()
    PCA_num = ini.int("PCA_num", len(PCA_params))
    if PCA_num != 0:
        if PCA_num < 2:
            doError("Can only do PCA for 2 or more parameters")
        PCA_func = ini.string("PCA_func", "")
        # Characters representing functional mapping
        if PCA_func == "":
            PCA_func = ["N"] * PCA_num  # No mapping
        PCA_NormParam = ini.string("PCA_normparam", "") or None

    make_scatter_samples = ini.bool("make_scatter_samples", False)

    # ==============================================================================

    first_chain = ini.int("first_chain", 0)
    last_chain = ini.int("chain_num", -1)
    # -1 means keep reading until one not found

    # Chain files
    chain_files = chains.chainFiles(
        in_root, first_chain=first_chain, last_chain=last_chain, chain_exclude=chain_exclude
    )

    mc.loadChains(in_root, chain_files)

    mc.removeBurnFraction(ignorerows)
    mc.deleteFixedParams()
    mc.makeSingle()

    def filterParList(namestring, num=None):
        if not namestring.strip():
            pars = mc.paramNames.list()
        else:
            pars = []
            for name in namestring.split():
                if "?" in name or "*" in name:
                    pars += mc.paramNames.getMatches(name, strings=True)
                elif mc.paramNames.parWithName(name):
                    pars.append(name)
        if num is not None and len(pars) != num:
            raise Exception("%iD plot has missing parameter or wrong number of parameters: %s" % (num, pars))
        return pars

    if cool != 1:
        print("Cooling chains by ", cool)
        mc.cool(cool)

    mc.updateBaseStatistics()

    if not no_tests:
        mc.getConvergeTests(mc.converge_test_limit, writeDataToFile=True, feedback=True)

    mc.writeCovMatrix()
    mc.writeCorrelationMatrix()

    # Output thinned data if requested
    # Must do this with unsorted output
    if thin_factor != 0:
        thin_ix = mc.thin_indices(thin_factor)
        filename = rootdirname + "_thin.txt"
        mc.writeThinData(filename, thin_ix, thin_cool)

    print(mc.getNumSampleSummaryText().strip())
    if mc.likeStats:
        print(mc.likeStats.likeSummary().strip())

    if PCA_num > 0 and not plots_only:
        mc.PCA(PCA_params, PCA_func, PCA_NormParam, writeDataToFile=True)

    if not no_plots:
        # set plot_data_dir before we generate the 1D densities below
        plot_data_dir = ini.string("plot_data_dir", default="", allowEmpty=True)
        if plot_data_dir and not os.path.isdir(plot_data_dir):
            os.mkdir(plot_data_dir)
    else:
        plot_data_dir = None
    mc.plot_data_dir = plot_data_dir

    # Do 1D bins
    mc._setDensitiesandMarge1D(writeDataToFile=not no_plots and plot_data_dir, meanlikes=plot_meanlikes)

    if not no_plots:
        # Output files for 1D plots
        print("Calculating plot data...")

        plotparams = []
        line = ini.string("plot_params", "")
        if line not in ["", "0"]:
            plotparams = filterParList(line)

        line = ini.string("plot_2D_param", "").strip()
        plot_2D_param = None
        if line and line != "0":
            plot_2D_param = line

        cust2DPlots = []
        if not plot_2D_param:
            # Use custom array of specific plots
            num_cust2D_plots = ini.int("plot_2D_num", 0)
            for i in range(1, num_cust2D_plots + 1):
                line = ini.string("plot" + str(i))
                pars = filterParList(line, 2)
                cust2DPlots.append(pars)

        triangle_params = []
        triangle_plot = ini.bool("triangle_plot", False)
        if triangle_plot:
            line = ini.string("triangle_params", "")
            triangle_params = filterParList(line)
            triangle_num = len(triangle_params)
            triangle_plot = triangle_num > 1

        num_3D_plots = ini.int("num_3D_plots", 0)
        plot_3D = []
        for ix in range(1, num_3D_plots + 1):
            line = ini.string("3D_plot" + str(ix))
            plot_3D.append(filterParList(line, 3))

        # Produce file of weight-1 samples if requested
        if (num_3D_plots and not make_single_samples or make_scatter_samples) and not no_plots:
            make_single_samples = True
            single_thin = max(1, int(round(mc.norm / mc.max_mult)) // mc.max_scatter_points)

        if plot_data_dir:
            if make_single_samples:
                filename = os.path.join(plot_data_dir, rootname.strip() + "_single.txt")
                mc.makeSingleSamples(filename, single_thin)

            # Write paramNames file
            mc.getParamNames().saveAsText(os.path.join(plot_data_dir, rootname + ".paramnames"))
            mc.getBounds().saveToFile(os.path.join(plot_data_dir, rootname + ".bounds"))

        make_plots = ini.bool("make_plots", False)

        done2D = {}

        filename = rootdirname + "." + plot_ext
        mc.writeScriptPlots1D(filename, plotparams)
        if make_plots:
            runScript(filename)

        # Do 2D bins
        if plot_2D_param == "corr":
            # In this case output the most correlated variable combinations
            print("...doing 2D plots for most correlated variables")
            cust2DPlots = mc.getCorrelatedVariable2DPlots()
            plot_2D_param = None
        elif plot_2D_param:
            mc.paramNames.parWithName(plot_2D_param, error=True)  # just check

        if cust2DPlots or plot_2D_param:
            print("...producing 2D plots")
            filename = rootdirname + "_2D." + plot_ext
            done2D = mc.writeScriptPlots2D(
                filename, plot_2D_param, cust2DPlots, writeDataToFile=plot_data_dir, shade_meanlikes=shade_meanlikes
            )
            if make_plots:
                runScript(filename)

        if triangle_plot:
            # Add the off-diagonal 2D plots
            print("...producing triangle plot")
            filename = rootdirname + "_tri." + plot_ext
            mc.writeScriptPlotsTri(filename, triangle_params)
            for i, p2 in enumerate(triangle_params):
                for p1 in triangle_params[i + 1 :]:
                    if not done2D.get((p1, p2)) and plot_data_dir:
                        mc.get2DDensityGridData(p1, p2, writeDataToFile=True, meanlikes=shade_meanlikes)
            if make_plots:
                runScript(filename)

        # Do 3D plots (i.e. 2D scatter plots with coloured points)
        if num_3D_plots:
            print("...producing ", num_3D_plots, "2D colored scatter plots")
            filename = rootdirname + "_3D." + plot_ext
            mc.writeScriptPlots3D(filename, plot_3D)
            if make_plots:
                runScript(filename)

    if not plots_only:
        # Write out stats marginalized
        mc.getMargeStats().saveAsText(rootdirname + ".margestats")

        # Limits from global likelihood
        if mc.loglikes is not None:
            mc.getLikeStats().saveAsText(rootdirname + ".likestats")

    # System command
    if finish_run_command:
        finish_run_command = finish_run_command.replace("%ROOTNAME%", rootname)
        finish_run_command = finish_run_command.replace("%PLOTDIR%", plot_data_dir)
        finish_run_command = finish_run_command.replace("%PLOTROOT%", os.path.join(plot_data_dir, rootname))
        os.system(finish_run_command)
Ejemplo n.º 15
0
if args.plot_data is None:
    data_dir = batch.batchPath + 'plot_data' + os.sep
else:
    data_dir = os.path.abspath(args.plot_data) + os.sep
ini_dir = batch.batchPath + 'getdist' + os.sep

checkDir(data_dir)
checkDir(ini_dir)

if args.delay: time.sleep(args.delay)
processes = set()

if not args.plots:
    for jobItem in Opts.filteredBatchItems():
        ini = IniFile()
        ini.params['file_root'] = jobItem.chainRoot
        checkDir(jobItem.distPath)
        ini.params['out_dir'] = jobItem.distPath
        ini.params['plot_data_dir'] = data_dir
        custom_plot = batch.commonPath + 'plots' + os.sep + jobItem.paramtag + '.ini'
        custom_plot2 = batch.commonPath + 'plots' + os.sep + jobItem.name + '.ini'
        if os.path.exists(custom_plot2):
            ini.includes.append(custom_plot2)
        elif os.path.exists(custom_plot):
            ini.includes.append(custom_plot)
        ini.defaults.append(batch.commonPath + base_ini)
        tag = ''
        if jobItem.isImportanceJob or args.burn_removed or jobItem.isBurnRemoved():
            ini.params['ignore_rows'] = 0
        if jobItem.isImportanceJob:
Ejemplo n.º 16
0
    def load_dataset(self, filename, dataset_params):
        from getdist import IniFile

        ini = IniFile(filename)
        ini.params.update(dataset_params)
        self.indices = []
        self.used_indices = []
        self.used_items = []
        self.fullcov = np.loadtxt(ini.relativeFileName('cov_file'))
        ntheta = ini.int('num_theta_bins')
        self.theta_bins = np.loadtxt(ini.relativeFileName('theta_bins_file'))
        self.iintrinsic_alignment_model = ini.string('intrinsic_alignment_model')

        self.data_types = ini.string('data_types').split()
        self.used_types = ini.list('used_data_types', self.data_types)
        with open(ini.relativeFileName('data_selection')) as f:
            header = f.readline()
            assert ('#  type bin1 bin2 theta_min theta_max' == header.strip())
            lines = f.readlines()
        ranges = {}
        for tp in self.data_types:
            ranges[tp] = np.empty((6, 6), dtype=object)
        for line in lines:
            items = line.split()
            if items[0] in self.used_types:
                bin1, bin2 = [int(x) - 1 for x in items[1:3]]
                ranges[items[0]][bin1][bin2] = [np.float64(x) for x in items[3:]]

        self.ranges = ranges

        self.nzbins = ini.int('num_z_bins')  # for lensing sources
        self.nwbins = ini.int('num_gal_bins', 0)  # for galaxies
        maxbin = max(self.nzbins, self.nwbins)

        cov_ix = 0
        self.bin_pairs = []
        self.data_arrays = []
        self.thetas = []
        for i, tp in enumerate(self.data_types):
            xi = np.loadtxt(ini.relativeFileName('measurements[%s]' % tp))
            bin1 = xi[:, 0].astype(np.int) - 1
            bin2 = xi[:, 1].astype(np.int) - 1
            tbin = xi[:, 2].astype(np.int) - 1
            corr = np.empty((maxbin, maxbin), dtype=np.object)
            corr[:, :] = None
            self.data_arrays.append(corr)
            self.bin_pairs.append([])
            for f1, f2, ix, dat in zip(bin1, bin2, tbin, xi[:, 3]):
                self.indices.append((i, f1, f2, ix))
                if not (f1, f2) in self.bin_pairs[i]:
                    self.bin_pairs[i].append((f1, f2))
                    corr[f1, f2] = np.zeros(ntheta)
                corr[f1, f2][ix] = dat
                if ranges[tp][f1, f2] is not None:
                    mn, mx = ranges[tp][f1, f2]
                    if self.theta_bins[ix] > mn and self.theta_bins[ix] < mx:
                        self.thetas.append(self.theta_bins[ix])
                        self.used_indices.append(cov_ix)
                        self.used_items.append(self.indices[-1])
                cov_ix += 1

        nz_source = np.loadtxt(ini.relativeFileName('nz_file'))
        self.zmid = nz_source[:, 1]
        self.zbin_sp = []
        for b in range(self.nzbins):
            self.zbin_sp += [UnivariateSpline(self.zmid, nz_source[:, b + 3], s=0)]

        nz_lens = np.loadtxt(ini.relativeFileName('nz_gal_file'))
        assert (np.array_equal(nz_lens[:, 1], self.zmid))
        self.zbin_w_sp = []
        for b in range(self.nwbins):
            self.zbin_w_sp += [UnivariateSpline(self.zmid, nz_lens[:, b + 3], s=0)]

        self.zmax = self.zmid[-1]

        self.kmax = ini.float('kmax', 15)  # Actually computed, assumes extrapolated beyond that
        self._initialize()
Ejemplo n.º 17
0
if args.plot_data is None:
    data_dir = batch.batchPath + 'plot_data' + os.sep
else:
    data_dir = os.path.abspath(args.plot_data) + os.sep
ini_dir = batch.batchPath + 'getdist' + os.sep

checkDir(data_dir)
checkDir(ini_dir)

if args.delay: time.sleep(args.delay)
processes = set()

if not args.plots:
    for jobItem in Opts.filteredBatchItems():
        ini = IniFile()
        ini.params['file_root'] = jobItem.chainRoot
        checkDir(jobItem.distPath)
        ini.params['out_dir'] = jobItem.distPath
        ini.params['plot_data_dir'] = data_dir
        custom_plot = batch.commonPath + 'plots' + os.sep + jobItem.paramtag + '.ini'
        custom_plot2 = batch.commonPath + 'plots' + os.sep + jobItem.name + '.ini'
        if os.path.exists(custom_plot2):
            ini.includes.append(custom_plot2)
        elif os.path.exists(custom_plot):
            ini.includes.append(custom_plot)
        ini.defaults.append(batch.commonPath + base_ini)
        tag = ''
        if jobItem.isImportanceJob or args.burn_removed or jobItem.isBurnRemoved(
        ):
            ini.params['ignore_rows'] = 0
Ejemplo n.º 18
0
 def loadDataset(self, froot):
     if not '.dataset' in froot: froot += '.dataset'
     ini = IniFile(froot)
     self.readIni(ini)
Ejemplo n.º 19
0
    def initialize(self):
        def relative_path(tag):
            return ini.relativeFileName(tag).replace('data/', '').replace(
                'Pantheon/', '')

        # has_absdist = F, intrinsicdisp=0, idispdataset=False
        if not self.path:
            if self.path_install:
                from importlib import import_module
                self.path = getattr(
                    import_module(_package + ".likelihoods." + self.name,
                                  package=_package),
                    "get_path")(self.path_install)
            else:
                self.log.error(
                    "No path given to the %s likelihood. Set the likelihood"
                    " property 'path' or the common property '%s'.",
                    self.dataset_file, _path_install)
                raise HandledException
        self.path = os.path.normpath(self.path)
        self.dataset_file_path = os.path.normpath(
            os.path.join(self.path, self.dataset_file))
        self.log.info("Reading data from %s", self.dataset_file_path)
        if not os.path.exists(self.dataset_file_path):
            self.log.error(
                "The likelihood is not installed in the given path: "
                "cannot find the file '%s'.", self.dataset_file_path)
            raise HandledException
        ini = IniFile(self.dataset_file_path)
        ini.params.update(self.dataset_params or {})
        self.twoscriptmfit = ini.bool('twoscriptmfit')
        if self.twoscriptmfit:
            scriptmcut = ini.float('scriptmcut', 10.)
        assert not ini.float('intrinsicdisp', 0) and not ini.float(
            'intrinsicdisp0', 0)
        if hasattr(self, "alpha_beta_names"):
            self.alpha_name = self.alpha_beta_names[0]
            self.beta_name = self.alpha_beta_names[1]
        self.pecz = ini.float('pecz', 0.001)
        cols = None
        self.has_third_var = False
        data_file = os.path.join(self.path, ini.string("data_file"))
        self.log.debug('Reading %s' % data_file)
        supernovae = {}
        self.names = []
        ix = 0
        with io.open(data_file, 'r') as f:
            lines = f.readlines()
            for line in lines:
                if '#' in line:
                    cols = line[1:].split()
                    for rename, new in zip([
                            'mb', 'color', 'x1', '3rdvar', 'd3rdvar',
                            'cov_m_s', 'cov_m_c', 'cov_s_c'
                    ], [
                            'mag', 'colour', 'stretch', 'third_var',
                            'dthird_var', 'cov_mag_stretch', 'cov_mag_colour',
                            'cov_stretch_colour'
                    ]):
                        if rename in cols:
                            cols[cols.index(rename)] = new
                    self.has_third_var = 'third_var' in cols
                    zeros = np.zeros(len(lines) - 1)
                    self.third_var = zeros.copy()
                    self.dthird_var = zeros.copy()
                    self.set = zeros.copy()
                    for col in cols:
                        setattr(self, col, zeros.copy())
                elif line.strip():
                    if cols is None:
                        self.log.error('Data file must have comment header')
                        raise HandledException
                    vals = line.split()
                    for i, (col, val) in enumerate(zip(cols, vals)):
                        if col == 'name':
                            supernovae[val] = ix
                            self.names.append(val)
                        else:
                            getattr(self, col)[ix] = np.float64(val)
                    ix += 1
        self.z_var = self.dz**2
        self.mag_var = self.dmb**2
        self.stretch_var = self.dx1**2
        self.colour_var = self.dcolor**2
        self.thirdvar_var = self.dthird_var**2
        self.nsn = ix
        self.log.debug('Number of SN read: %s ' % self.nsn)
        if self.twoscriptmfit and not self.has_third_var:
            self.log.error(
                'twoscriptmfit was set but thirdvar information not present')
            raise HandledException
        if ini.bool('absdist_file'):
            self.log.error('absdist_file not supported')
            raise HandledException
        covmats = [
            'mag', 'stretch', 'colour', 'mag_stretch', 'mag_colour',
            'stretch_colour'
        ]
        self.covs = {}
        for name in covmats:
            if ini.bool('has_%s_covmat' % name):
                self.log.debug('Reading covmat for: %s ' % name)
                self.covs[name] = self._read_covmat(
                    os.path.join(self.path,
                                 ini.string('%s_covmat_file' % name)))
        self.alphabeta_covmat = (len(self.covs.items()) > 1
                                 or self.covs.get('mag', None) is None)
        self._last_alpha = np.inf
        self._last_beta = np.inf
        self.marginalize = getattr(self, "marginalize", False)
        assert self.covs
        # jla_prep
        zfacsq = 25.0 / np.log(10.0)**2
        self.pre_vars = self.mag_var + zfacsq * self.pecz**2 * (
            (1.0 + self.zcmb) / (self.zcmb * (1 + 0.5 * self.zcmb)))**2
        if self.twoscriptmfit:
            A1 = np.zeros(self.nsn)
            A2 = np.zeros(self.nsn)
            A1[self.third_var <= scriptmcut] = 1
            A2[self.third_var > scriptmcut] = 1
            has_A1 = np.any(A1)
            has_A2 = np.any(A2)
            if not has_A1:
                # swap
                A1 = A2
                A2 = np.zeros(self.nsn)
                has_A2 = False
            if not has_A2:
                self.twoscriptmfit = False
            self.A1 = A1
            self.A2 = A2
        if self.marginalize:
            self.step_width_alpha = self.marginalize_params['step_width_alpha']
            self.step_width_beta = self.marginalize_params['step_width_beta']
            _marge_steps = self.marginalize_params['marge_steps']
            self.alpha_grid = np.empty((2 * _marge_steps + 1)**2)
            self.beta_grid = self.alpha_grid.copy()
            _int_points = 0
            for alpha_i in range(-_marge_steps, _marge_steps + 1):
                for beta_i in range(-_marge_steps, _marge_steps + 1):
                    if alpha_i**2 + beta_i**2 <= _marge_steps**2:
                        self.alpha_grid[_int_points] = (
                            self.marginalize_params['alpha_centre'] +
                            alpha_i * self.step_width_alpha)
                        self.beta_grid[_int_points] = (
                            self.marginalize_params['beta_centre'] +
                            beta_i * self.step_width_beta)
                        _int_points += 1
            self.log.debug('Marignalizing alpha, beta over %s points' %
                           _int_points)
            self.marge_grid = np.empty(_int_points)
            self.int_points = _int_points
            self.alpha_grid = self.alpha_grid[:_int_points]
            self.beta_grid = self.beta_grid[:_int_points]
            self.invcovs = np.empty(_int_points, dtype=np.object)
            if self.precompute_covmats:
                for i, (alpha,
                        beta) in enumerate(zip(self.alpha_grid,
                                               self.beta_grid)):
                    self.invcovs[i] = self.inverse_covariance_matrix(
                        alpha, beta)
        elif not self.alphabeta_covmat:
            self.inverse_covariance_matrix()
Ejemplo n.º 20
0
def make_forecast_cmb_dataset(fiducial_Cl,
                              output_root,
                              output_dir=None,
                              noise_muK_arcmin_T=None,
                              noise_muK_arcmin_P=None,
                              NoiseVar=None,
                              ENoiseFac=2,
                              fwhm_arcmin=None,
                              lmin=2,
                              lmax=None,
                              fsky=1.0,
                              lens_recon_noise=None,
                              cl_dict_lmin=0):  # pragma: no cover
    """
    Make a simulated .dataset and associated files with 'data' set at the input fiducial
    model. Uses the exact full-sky log-likelihood, scaled by fsky.

    If you want to use numerical N_L CMB noise files, you can just replace the noise
    .dat text file produced by this function.

    :param fiducial_Cl: dictionary of Cls to use, combination of tt, te, ee, bb, pp;
                        note te must be included with tt and ee when using them
    :param output_root: root name for output files, e.g. 'my_sim1'
    :param output_dir: output directory
    :param noise_muK_arcmin_T: temperature noise in muK-arcmin
    :param noise_muK_arcmin_P: polarization noise in muK-arcmin
    :param NoiseVar: alternatively if noise_muK_arcmin_T is None, effective
        isotropic noise variance for the temperature (N_L=NoiseVar with no beam)
    :param ENoiseFac: factor by which polarization noise variance is higher thab
                NoiseVar (usually 2, for Planck about 4
                        as only half the detectors polarized)
    :param fwhm_arcmin: beam fwhm in arcminutes
    :param lmin: l_min
    :param lmax: l_max
    :param fsky: sky fraction
    :param lens_recon_noise: optional array, starting at L=0, for the
       pp lensing reconstruction noise, in [L(L+1)]^2C_L^phi/2pi units
    :param cl_dict_lmin: l_min for the arrays in fiducial_Cl
    :return: IniFile that was saved
    """
    ini = IniFile()
    dataset = ini.params

    cl_keys = fiducial_Cl.keys()
    use_CMB = set(cl_keys).intersection(set(CMB_keys))
    use_lensing = lens_recon_noise

    if use_CMB:
        if NoiseVar is None:
            if noise_muK_arcmin_T is None:
                raise ValueError('Must specify noise')
            NoiseVar = white_noise_from_muK_arcmin(noise_muK_arcmin_T)
            if noise_muK_arcmin_P is not None:
                ENoiseFac = (noise_muK_arcmin_P / noise_muK_arcmin_T)**2
        elif noise_muK_arcmin_T is not None or noise_muK_arcmin_P is not None:
            raise ValueError('Specific either noise_muK_arcmin or NoiseVar')
        fields_use = ''
        if 'tt' in cl_keys or 'te' in cl_keys:
            fields_use = 'T'
        if 'ee' in cl_keys or 'te' in cl_keys:
            fields_use += ' E'
        if 'bb' in cl_keys:
            fields_use += ' B'
        if 'pp' in cl_keys and use_lensing:
            fields_use += ' P'
        if 'tt' in cl_keys and 'ee' in cl_keys and 'te' not in cl_keys:
            raise ValueError(
                'Input power spectra should have te if using tt and ee -'
                'using the exact likelihood requires the full covariance.')
    else:
        fields_use = 'P'

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    dataset['fields_use'] = fields_use

    if use_CMB:
        fwhm = fwhm_arcmin / 60
        xlc = 180 * np.sqrt(8. * np.log(2.)) / np.pi
        sigma2 = (fwhm / xlc)**2
        noise_cols = 'TT           EE          BB'
        if use_lensing:
            noise_cols += '          PP'
    elif use_lensing:
        noise_cols = 'PP'
    else:
        raise ValueError('Must use CMB or lensing C_L')
    noise_file = output_root + '_Noise.dat'
    with open(os.path.join(output_dir, noise_file), 'w') as f:
        f.write('#L %s\n' % noise_cols)

        for ell in range(lmin, lmax + 1):
            noises = []
            if use_CMB:
                # noinspection PyUnboundLocalVariable
                noise_cl = ell * (ell + 1.) / 2 / np.pi * NoiseVar * np.exp(
                    ell * (ell + 1) * sigma2)
                noises += [
                    noise_cl, ENoiseFac * noise_cl, ENoiseFac * noise_cl
                ]
            if use_lensing:
                noises += [lens_recon_noise[ell]]
            f.write("%d " % ell + " ".join("%E" % elem
                                           for elem in noises) + "\n")

    dataset['fullsky_exact_fksy'] = fsky
    dataset['dataset_format'] = 'CMBLike2'
    dataset['like_approx'] = 'exact'

    dataset['cl_lmin'] = lmin
    dataset['cl_lmax'] = lmax

    dataset['binned'] = False

    dataset['cl_hat_includes_noise'] = False

    save_cl_dict(os.path.join(output_dir, output_root + '.dat'),
                 fiducial_Cl,
                 cl_dict_lmin=cl_dict_lmin)
    dataset['cl_hat_file'] = output_root + '.dat'
    dataset['cl_noise_file '] = noise_file

    ini.saveFile(os.path.join(output_dir, output_root + '.dataset'))
    return ini
Ejemplo n.º 21
0
def getdist_script(args, exit_on_error=True):
    def do_error(msg):
        if exit_on_error:
            print(msg)
            sys.exit()
        raise ValueError(msg)

    result = []

    def doprint(*s):
        result.append(" ".join([str(x) for x in s]))
        print(*s)

    no_plots = False
    chain_root = args.chain_root
    if args.ini_file is None and chain_root is None:
        do_error(
            'Must give either a .ini file of parameters or a chain file root name. Run "getdist -h" for help.'
        )
    if '.ini' not in args.ini_file and chain_root is None:
        # use default settings acting on chain_root, no plots
        chain_root = args.ini_file
        args.ini_file = getdist.default_getdist_settings
        no_plots = True
    if not os.path.isfile(args.ini_file):
        do_error('Parameter file does not exist: ' + args.ini_file)
    if chain_root and chain_root.endswith('.txt'):
        chain_root = chain_root[:-4]

    if chain_root is not None and ('*' in chain_root or '?' in chain_root):
        import glob
        import copy
        for ending in ['.paramnames', 'updated.yaml']:
            for f in glob.glob(chain_root + ending):
                fileargs = copy.copy(args)
                fileargs.chain_root = f.replace(ending, '')
                getdist_script(fileargs)
        return

    # Input parameters
    ini = IniFile(args.ini_file)

    for item in set(ini.params.keys()).intersection({
            'make_single_samples', 'single_thin', 'dump_ND_bins',
            'plot_meanlikes', 'shade_meanlikes', 'plot_data_dir',
            'force_twotail'
    }):
        if ini.string(item) not in [0, 'F']:
            logging.warning(
                '%s is no longer supported by getdist, value ignored' % item)

    # File root
    if chain_root is not None:
        in_root = chain_root
    else:
        in_root = ini.params['file_root']
    if not in_root:
        do_error('Chain Root file name not given ')
    rootname = os.path.basename(in_root)

    if args.ignore_rows is not None:
        ignorerows = args.ignore_rows
    else:
        ignorerows = ini.float('ignore_rows', 0.0)

    samples_are_chains = ini.bool('samples_are_chains', True)

    paramnames = ini.string('parameter_names', '')

    # Create instance of MCSamples
    mc = MCSamples(in_root,
                   ini=ini,
                   files_are_chains=samples_are_chains,
                   paramNamesFile=paramnames)

    if ini.bool('adjust_priors', False) or ini.bool('map_params', False):
        do_error(
            'To adjust priors or define new parameters, use a separate python script; '
            'see the python getdist docs for examples')

    plot_ext = ini.string('plot_ext', 'py')
    finish_run_command = ini.string('finish_run_command', '')

    no_plots = ini.bool('no_plots', no_plots)
    plots_only = ini.bool('plots_only', False)
    no_tests = plots_only or ini.bool('no_tests', False)

    thin_factor = ini.int('thin_factor', 0)
    thin_cool = ini.float('thin_cool', 1.0)

    cool = ini.float('cool', 1.0)

    chain_exclude = ini.int_list('exclude_chain')

    out_dir = ini.string('out_dir', './')
    if out_dir:
        if not os.path.isdir(out_dir):
            os.mkdir(out_dir)
        doprint('producing files in directory ', out_dir)
    mc.out_dir = out_dir

    out_root = ini.string('out_root', '')
    if out_root:
        rootname = out_root
        doprint('producing files with with root ', out_root)
    mc.rootname = rootname

    rootdirname = os.path.join(out_dir, rootname)
    mc.rootdirname = rootdirname

    if 'do_minimal_1d_intervals' in ini.params:
        do_error(
            'do_minimal_1d_intervals no longer used; set credible_interval_threshold instead'
        )

    line = ini.string('PCA_params', '')
    if line.lower() == 'all':
        PCA_params = mc.paramNames.list()
    else:
        PCA_params = line.split()
    PCA_num = ini.int('PCA_num', len(PCA_params))
    if PCA_num != 0:
        if PCA_num < 2:
            do_error('Can only do PCA for 2 or more parameters')
        PCA_func = ini.string('PCA_func', '')
        # Characters representing functional mapping
        if PCA_func == '':
            PCA_func = ['N'] * PCA_num  # No mapping
        PCA_NormParam = ini.string('PCA_normparam', '') or None

    # ==============================================================================

    first_chain = ini.int('first_chain', 0)
    last_chain = ini.int('chain_num', -1)
    # -1 y keep reading until one not found

    # Chain files
    for separator in ['_', '.']:
        chain_files = chains.chainFiles(in_root,
                                        first_chain=first_chain,
                                        last_chain=last_chain,
                                        chain_exclude=chain_exclude,
                                        separator=separator)
        if chain_files:
            break

    mc.loadChains(in_root, chain_files)

    mc.removeBurnFraction(ignorerows)
    if chains.print_load_details:
        if ignorerows:
            doprint('Removed %s as burn in' % ignorerows)
        else:
            doprint('Removed no burn in')

    mc.deleteFixedParams()
    mc.makeSingle()

    def filterParList(namestring, num=None):
        if not namestring.strip():
            _pars = mc.paramNames.list()
        else:
            _pars = []
            for name in namestring.split():
                if '?' in name or '*' in name:
                    _pars += mc.paramNames.getMatches(name, strings=True)
                elif mc.paramNames.parWithName(name):
                    _pars.append(name)
        if num is not None and len(_pars) != num:
            doprint(
                '%iD plot has missing parameter or wrong number of parameters: %s'
                % (num, _pars))
            _pars = None
        return _pars

    if cool != 1:
        doprint('Cooling chains by ', cool)
        mc.cool(cool)

    mc.updateBaseStatistics()

    if not no_tests:
        mc.getConvergeTests(mc.converge_test_limit,
                            writeDataToFile=True,
                            feedback=True)

    mc.writeCovMatrix()
    mc.writeCorrelationMatrix()

    # Output thinned data if requested
    # Must do this with unsorted output
    if thin_factor > 1:
        thin_ix = mc.thin_indices(thin_factor)
        filename = rootdirname + '_thin.txt'
        mc.writeThinData(filename, thin_ix, thin_cool)

    doprint(mc.getNumSampleSummaryText().strip())
    if mc.likeStats:
        doprint(mc.likeStats.likeSummary().strip())

    if PCA_num > 0 and not plots_only:
        mc.PCA(PCA_params, PCA_func, PCA_NormParam, writeDataToFile=True)

    # Do 1D bins
    mc._setDensitiesandMarge1D()

    if not no_plots:
        # Output files for 1D plots

        plotparams = []
        line = ini.string('plot_params', '')
        if line not in ['', '0']:
            plotparams = filterParList(line)

        line = ini.string('plot_2D_param', '').strip()
        plot_2D_param = None
        if line and line != '0':
            plot_2D_param = line

        cust2DPlots = []
        if not plot_2D_param:
            # Use custom array of specific plots
            num_cust2D_plots = ini.int('plot_2D_num', 0)
            for i in range(1, num_cust2D_plots + 1):
                line = ini.string('plot' + str(i))
                pars = filterParList(line, 2)
                if pars is not None:
                    cust2DPlots.append(pars)
                else:
                    num_cust2D_plots -= 1

        triangle_params = []
        triangle_plot = ini.bool('triangle_plot', False)
        if triangle_plot:
            line = ini.string('triangle_params', '')
            triangle_params = filterParList(line)
            triangle_num = len(triangle_params)
            triangle_plot = triangle_num > 1

        num_3D_plots = ini.int('num_3D_plots', 0)
        plot_3D = []
        for ix in range(1, num_3D_plots + 1):
            line = ini.string('3D_plot' + str(ix))
            pars = filterParList(line, 3)
            if pars is not None:
                plot_3D.append(pars)
            else:
                num_3D_plots -= 1

        make_plots = ini.bool('make_plots', False) or args.make_plots

        filename = rootdirname + '.' + plot_ext
        mc._writeScriptPlots1D(filename, plotparams)
        if make_plots:
            runScript(filename)

        # Do 2D bins
        if plot_2D_param == 'corr':
            # In this case output the most correlated variable combinations
            doprint('...doing 2D plots for most correlated variables')
            cust2DPlots = mc.getCorrelatedVariable2DPlots()
            plot_2D_param = None
        elif plot_2D_param:
            mc.paramNames.parWithName(plot_2D_param, error=True)  # just check

        if cust2DPlots or plot_2D_param:
            doprint('...producing 2D plots')
            filename = rootdirname + '_2D.' + plot_ext
            mc._writeScriptPlots2D(filename, plot_2D_param, cust2DPlots)
            if make_plots:
                runScript(filename)

        if triangle_plot:
            # Add the off-diagonal 2D plots
            doprint('...producing triangle plot')
            filename = rootdirname + '_tri.' + plot_ext
            mc._writeScriptPlotsTri(filename, triangle_params)
            if make_plots:
                runScript(filename)

        # Do 3D plots (i.e. 2D scatter plots with coloured points)
        if num_3D_plots:
            doprint('...producing ', num_3D_plots, '2D colored scatter plots')
            filename = rootdirname + '_3D.' + plot_ext
            mc._writeScriptPlots3D(filename, plot_3D)
            if make_plots:
                runScript(filename)

    if not plots_only:
        # Write out stats marginalized
        mc.getMargeStats().saveAsText(rootdirname + '.margestats')

        # Limits from global likelihood
        if mc.loglikes is not None:
            mc.getLikeStats().saveAsText(rootdirname + '.likestats')

    # System command
    if finish_run_command:
        finish_run_command = finish_run_command.replace('%ROOTNAME%', rootname)
        os.system(finish_run_command)

    return "\n".join(result)
Ejemplo n.º 22
0
# Noise var is N_l in muK^2 for white noise
# note  NoiseVar = (muKArcmin * np.pi / 180 / 60.) ** 2

fwhm_arcmin = 5.
# NoiseVar = 2e-4
NoiseVar = 4e-5
# Pol noise var = ENoiseFac * NoiseVar
# 2 normally, but for Planck only half detectors are polarized
ENoiseFac = 4
lmin = 2
lmax = 2500
fsky = 0.57


# os.path.dirname(sys.path[0])+'/data/'
ini = IniFile()
dataset = ini.params

# change this if you don't want to use all pol
dataset['fields_use'] = 'T E'


# #Now produce the Planck_like files
fwhm = fwhm_arcmin / 60
xlc = 180 * np.sqrt(8.*np.log(2.)) / np.pi
sigma2 = (fwhm / xlc) ** 2

outPath = ''
outRoot = root + '_exactsim'
NoiseOut = []
Ejemplo n.º 23
0
    def testGetDist(self):

        def callGetDist(args):
            if os.getenv('TRAVIS', None):
                return str(subprocess.check_output(['GetDist.py'] + args, env={'PATH': os.getenv('PATH')}))
            else:
                return str(subprocess.check_output(
                    ['python', os.path.join(os.path.dirname(__file__), '..' + os.sep, 'GetDist.py')] + args))

        os.chdir(self.tempdir)
        res = callGetDist([self.root])
        # Note this can fail if your local analysis defaults changes the default ignore_rows
        self.assertTrue('-Ln(mean like)  = 2.30' in res)
        fname = 'testchain_pars.ini'
        callGetDist(['--make_param_file', fname])
        ini = IniFile(fname)
        ini.params['no_plots'] = False
        ini.params['plot_2D_num'] = 1
        ini.params['plot1'] = 'x y'
        ini.params['num_3D_plots'] = 1
        ini.params['3D_plot1'] = 'x y x'
        ini.params['plot_data_dir'] = ''
        ini.params['triangle_params'] = '*[xy]*'

        ini.saveFile(fname)
        res = callGetDist([fname, self.root])
        self.assertTrue('-Ln(mean like)  = 2.30' in res)
        self.assertFalse(os.path.isfile(os.path.join(self.tempdir, 'plot_data', 'testchain_2D_x_y')))

        def checkRun():
            for f in ['.py', '_2D.py', '_3D.py', '_tri.py']:
                pyname = self.root + f
                self.assertTrue(os.path.isfile(pyname))
                subprocess.check_output(['python', pyname])
                pdf = self.root + f.replace('py', 'pdf')
                self.assertTrue(os.path.isfile(pdf))
                os.remove(pdf)
                os.remove(pyname)

        checkRun()

        ini.params['plot_data_dir'] = 'plot_data/'
        ini.saveFile(fname)
        callGetDist([fname, self.root])
        self.assertTrue(os.path.isfile(os.path.join(self.tempdir, 'plot_data', 'testchain_2D_x_y')))
        checkRun()
        shutil.rmtree(os.path.join(self.tempdir, 'plot_data'))
Ejemplo n.º 24
0
    def __init__(self, dataset, dataset_params={}, silent=False):
        if not silent: print('loading: %s' % dataset)
        ini = IniFile(dataset)
        ini.params.update(dataset_params)
        spectra = np.loadtxt(ini.relativeFileName('cl_hat_file'))
        covmat_cl = ini.split('covmat_cl')
        use_cl = ini.split('use_cl', covmat_cl)
        if ini.hasKey('use_range'):
            used_ell = ini.params['use_range']
            if isinstance(used_ell, dict):
                print('Using range %s' % used_ell)
                for key, value in used_ell.items():
                    used_ell[key] = range_to_ells(value)
            else:
                if not silent: print('Using range: %s' % used_ell)
                used_ell = range_to_ells(used_ell)
        else:
            used_ell = None
        data_vector = []
        nX = 0
        used_indices = []
        with open(ini.relativeFileName('data_ranges', "r")) as f:
            lines = f.readlines()
            while not lines[-1].strip():
                lines = lines[:-1]
            self.Nspec = len(lines)
            lmin = np.zeros(self.Nspec, dtype=np.int)
            lmax = np.zeros(self.Nspec, dtype=np.int)
            self.cl_names = []
            self.ell_ranges = np.empty(self.Nspec, dtype=np.object)
            self.used_sizes = np.zeros(self.Nspec, dtype=np.int)
            for i, line in enumerate(lines):
                items = line.split()
                tp = items[0]
                self.cl_names.append(tp)
                lmin[i], lmax[i] = [int(x) for x in items[1:]]
                if lmax[i] and lmax[i] >= lmin[i]:
                    n = lmax[i] - lmin[i] + 1
                    data_vector.append(spectra[lmin[i]:lmax[i] + 1, i])
                    if tp in use_cl:
                        if used_ell is not None and (not isinstance(
                                used_ell, dict) or tp in used_ell):
                            if isinstance(used_ell, dict):
                                ells = used_ell[tp]
                            else:
                                ells = used_ell
                            self.ell_ranges[i] = np.array([
                                L for L in range(lmin[i], lmax[i] + 1)
                                if L in ells
                            ],
                                                          dtype=np.int)
                            used_indices.append(self.ell_ranges[i] +
                                                (nX - lmin[i]))
                        else:
                            used_indices.append(range(nX, nX + n))
                            self.ell_ranges[i] = range(lmin[i], lmax[i] + 1)
                        self.used_sizes[i] = len(self.ell_ranges[i])
                    else:
                        lmax[i] = -1
                    nX += n

        self.cl_used = np.array([name in use_cl for name in self.cl_names],
                                dtype=bool)
        covfile = ini.relativeFileName('covmat_fiducial')
        with open(covfile, "rb") as f:
            cov = np.fromfile(f,
                              dtype=[np.float32,
                                     np.float64]['64.bin' in covfile])
        assert (nX**2 == cov.shape[0])
        used_indices = np.concatenate(used_indices)
        self.data_vector = np.concatenate(data_vector)[used_indices]
        self.cov = cov.reshape(nX, nX)[np.ix_(used_indices,
                                              used_indices)].astype(np.float64)
        if not silent:
            for name, mn, mx in zip(self.cl_names, lmin, lmax):
                if name in use_cl:
                    print(name, mn, mx)
            print('Number of data points: %s' % self.cov.shape[0])
        self.lmax = lmax
        self.lmin = lmin
        max_l = np.max(self.lmax)
        self.ls = np.arange(max_l + 1)
        self.llp1 = self.ls * (self.ls + 1)

        if np.any(self.cl_used[:4]):
            pivot = 3000
            self.sz_143 = self.read_normalized(
                ini.relativeFileName('sz143file'), pivot)[:max_l + 1]
            self.ksz = self.read_normalized(ini.relativeFileName('kszfile'),
                                            pivot)[:max_l + 1]
            self.tszxcib = self.read_normalized(
                ini.relativeFileName('tszxcibfile'), pivot)[:max_l + 1]

            self.cib_217 = self.read_normalized(
                ini.relativeFileName('cib217file'), pivot)[:max_l + 1]

            self.dust = np.vstack(
                (self.read_normalized(
                    ini.relativeFileName('dust100file'))[:max_l + 1],
                 self.read_normalized(
                     ini.relativeFileName('dust143file'))[:max_l + 1],
                 self.read_normalized(
                     ini.relativeFileName('dust217file'))[:max_l + 1],
                 self.read_normalized(
                     ini.relativeFileName('dust143x217file'))[:max_l + 1]))
            self.lnrat = self.ls * 0
            l_min = np.min(lmin[self.cl_used])
            self.lnrat[l_min:] = np.log(self.ls[l_min:] / np.float64(pivot))

        import hashlib
        cache_file = self.dataset_file.replace(
            '.dataset', '_covinv_%s.npy' %
            hashlib.md5(str(ini.params).encode('utf8')).hexdigest())
        if use_cache and os.path.exists(cache_file):
            self.covinv = np.load(cache_file).astype(np.float64)
        else:
            self.covinv = np.linalg.inv(self.cov)
            if use_cache: np.save(cache_file, self.covinv.astype(np.float32))
Ejemplo n.º 25
0
def makeGrid(batchPath,
             settingName=None,
             settings=None,
             readOnly=False,
             interactive=False):
    batchPath = os.path.abspath(batchPath) + os.sep

    # 0: chains, 1: importance sampling, 2: best-fit, 3: best-fit and Hessian
    cosmomcAction = 0

    if not settings:
        if not settingName:
            if not pathIsGrid(batchPath):
                raise Exception(
                    'Need to give name of setting file if batchPath/config does not exist'
                )
            readOnly = True
            sys.path.insert(0, batchPath + 'config')
            sys.modules['batchJob'] = batchjob  # old name
            settings = __import__(
                IniFile(batchPath +
                        'config/config.ini').params['setting_file'].replace(
                            '.py', ''))
        else:
            settings = __import__(settingName, fromlist=['dummy'])

    batch = batchjob.batchJob(batchPath, settings.ini_dir)

    if hasattr(settings, 'skip'): batch.skip = settings.skip
    batch.makeItems(settings, messages=not readOnly)
    if readOnly:
        for jobItem in [b for b in batch.jobItems]:
            if not jobItem.chainExists():
                batch.jobItems.remove(jobItem)
        batch.save()
        print('OK, configured grid with %u existing chains' %
              (len(batch.jobItems)))
        return batch
    else:
        batch.makeDirectories(settings.__file__)
        batch.save()

    # priors and widths for parameters which are varied
    start_at_bestfit = getattr(settings, 'start_at_bestfit', False)
    params = getattr(settings, 'params', default_params)
    param_extra = getattr(settings, 'param_extra_opts',
                          default_param_extra_opts)

    for jobItem in batch.items(wantSubItems=False):

        jobItem.makeChainPath()
        ini = IniFile()

        for param in jobItem.param_set:
            ini.params['param[' + param + ']'] = params[param]
            if param_extra is not None and param in param_extra:
                ini.params.update(param_extra[param])

        if hasattr(settings, 'extra_opts'):
            ini.params.update(settings.extra_opts)

        ini.params['file_root'] = jobItem.chainRoot

        cov_dir_name = getattr(settings, 'cov_dir', 'planck_covmats')
        covdir = os.path.join(batch.basePath, cov_dir_name)
        covmat = os.path.join(covdir, jobItem.name + '.covmat')
        if not os.path.exists(covmat):
            covNameMappings = getattr(settings, 'covNameMappings', None)
            mapped_name_norm = jobItem.makeNormedName(covNameMappings)[0]
            covmat_normed = os.path.join(covdir, mapped_name_norm + '.covmat')
            covmat = covmat_normed
            if not os.path.exists(covmat) and hasattr(jobItem.data_set,
                                                      'covmat'):
                covmat = batch.basePath + jobItem.data_set.covmat
            if not os.path.exists(covmat) and hasattr(settings, 'covmat'):
                covmat = batch.basePath + settings.covmat
        else:
            covNameMappings = None
        if os.path.exists(covmat):
            ini.params['propose_matrix'] = covmat
            if getattr(settings, 'newCovmats', True):
                ini.params['MPI_Max_R_ProposeUpdate'] = 20
        else:
            hasCov = False
            ini.params['MPI_Max_R_ProposeUpdate'] = 20
            covmat_try = []
            if 'covRenamer' in dir(settings):
                covmat_try += settings.covRenamer(jobItem.name)
                covmat_try += settings.covRenamer(mapped_name_norm)
            if hasattr(settings, 'covrenames'):
                for aname in [jobItem.name, mapped_name_norm]:
                    covmat_try += [
                        aname.replace(old, new, 1)
                        for old, new in settings.covrenames if old in aname
                    ]
                    for new1, old1 in settings.covrenames:
                        if old1 in aname:
                            name = aname.replace(old1, new1, 1)
                            covmat_try += [
                                name.replace(old, new, 1)
                                for old, new in settings.covrenames
                                if old in name
                            ]
            if 'covWithoutNameOrder' in dir(settings):
                if covNameMappings:
                    removes = copy.deepcopy(covNameMappings)
                else:
                    removes = dict()
                for name in settings.covWithoutNameOrder:
                    if name in jobItem.data_set.names:
                        removes[name] = ''
                        covmat_try += [jobItem.makeNormedName(removes)[0]]
            covdir2 = os.path.join(
                batch.basePath,
                getattr(settings, 'cov_dir_fallback', cov_dir_name))
            for name in covmat_try:
                covmat = os.path.join(batch.basePath, covdir2,
                                      name + '.covmat')
                if os.path.exists(covmat):
                    ini.params['propose_matrix'] = covmat
                    print('covmat ' + jobItem.name + ' -> ' + name)
                    hasCov = True
                    break
            if not hasCov:
                print('WARNING: no matching specific covmat for ' +
                      jobItem.name)

        ini.params['start_at_bestfit'] = start_at_bestfit
        updateIniParams(ini, jobItem.data_set.params, batch.commonPath)
        for deffile in settings.defaults:
            ini.defaults.append(batch.commonPath + deffile)
        if hasattr(settings, 'override_defaults'):
            ini.defaults = [
                batch.commonPath + deffile
                for deffile in settings.override_defaults
            ] + ini.defaults

        ini.params['action'] = cosmomcAction
        ini.saveFile(jobItem.iniFile())
        if not start_at_bestfit:
            setMinimize(jobItem, ini)
            variant = '_minimize'
            ini.saveFile(jobItem.iniFile(variant))

            # add ini files for importance sampling runs
        for imp in jobItem.importanceJobs():
            if not getattr(jobItem, 'importanceFilter', None): continue
            if batch.hasName(imp.name.replace('_post', '')):
                raise Exception(
                    'importance sampling something you already have?')
            for minimize in (False, True):
                if minimize and not getattr(imp, 'want_minimize', True):
                    continue
                ini = IniFile()
                updateIniParams(ini, imp.importanceSettings, batch.commonPath)
                if cosmomcAction == 0 and not minimize:
                    for deffile in settings.importanceDefaults:
                        ini.defaults.append(batch.commonPath + deffile)
                    ini.params['redo_outroot'] = imp.chainRoot
                    ini.params['action'] = 1
                else:
                    ini.params['file_root'] = imp.chainRoot
                if minimize:
                    setMinimize(jobItem, ini)
                    variant = '_minimize'
                else:
                    variant = ''
                ini.defaults.append(jobItem.iniFile())
                ini.saveFile(imp.iniFile(variant))
                if cosmomcAction != 0: break

    if not interactive: return batch
    print('Done... to run do: python python/runbatch.py ' + batchPath)
    if not start_at_bestfit:
        print('....... for best fits: python python/runbatch.py ' + batchPath +
              ' --minimize')
    print('')
    print('for importance sampled: python python/runbatch.py ' + batchPath +
          ' --importance')
    print('for best-fit for importance sampled: python python/runbatch.py ' +
          batchPath + ' --importance_minimize')
Ejemplo n.º 26
0
from __future__ import print_function
import os
import sys
from getdist import types, IniFile

if len(sys.argv) < 3:
    print('Usage: python/bestFitCAMB.py chain_root iniName')
    sys.exit()

root = os.path.abspath(sys.argv[1])

pars = {'ombh2':'omegabh2', 'omch2':'omegach2', 'omnuh2':'omeganuh2', 'hubble':'H0', 'w':'w',
        'helium_fraction':'yheused', 'scalar_amp(1)':'A' , 'scalar_spectral_index(1)':'ns', 'scalar_nrun(1)':'nrun', 'initial_ratio(1)':'r',
        're_optical_depth':'tau', 're_delta_redshift':'deltazrei', 'massless_neutrinos':'nnu'}

ini = IniFile()

ini.params['re_use_optical_depth'] = True
ini.params['temp_cmb'] = 2.7255
ini.params['CMB_outputscale'] = 2.7255e6 ** 2.
ini.defaults.append('params.ini')

bf = types.BestFit(root + '.minimum', setParamNameFile=root + '.paramnames', want_fixed=True)

for camb, cosmomc in list(pars.items()):
    par = bf.parWithName(cosmomc)
    if par is not None: ini.params[camb] = par.best_fit

ini.params['scalar_amp(1)'] = float(ini.params['scalar_amp(1)']) / 1e9

nmassive = 1
Ejemplo n.º 27
0
    def testGetDist(self):
        def callGetDist(args):
            if os.getenv('TRAVIS', None):
                return str(
                    subprocess.check_output(['GetDist.py'] + args,
                                            env={'PATH': os.getenv('PATH')}))
            else:
                return str(
                    subprocess.check_output([
                        'python',
                        os.path.join(os.path.dirname(__file__), '..' +
                                     os.sep, 'GetDist.py')
                    ] + args))

        os.chdir(self.tempdir)
        res = callGetDist([self.root])
        # Note this can fail if your local analysis defaults changes the default ignore_rows
        self.assertTrue('-Ln(mean like)  = 2.30' in res)
        fname = 'testchain_pars.ini'
        callGetDist(['--make_param_file', fname])
        ini = IniFile(fname)
        ini.params['no_plots'] = False
        ini.params['plot_2D_num'] = 1
        ini.params['plot1'] = 'x y'
        ini.params['num_3D_plots'] = 1
        ini.params['3D_plot1'] = 'x y x'
        ini.params['plot_data_dir'] = ''
        ini.params['triangle_params'] = '*[xy]*'

        ini.saveFile(fname)
        res = callGetDist([fname, self.root])
        self.assertTrue('-Ln(mean like)  = 2.30' in res)
        self.assertFalse(
            os.path.isfile(
                os.path.join(self.tempdir, 'plot_data', 'testchain_2D_x_y')))

        def checkRun():
            for f in ['.py', '_2D.py', '_3D.py', '_tri.py']:
                pyname = self.root + f
                self.assertTrue(os.path.isfile(pyname))
                subprocess.check_output(['python', pyname])
                pdf = self.root + f.replace('py', 'pdf')
                self.assertTrue(os.path.isfile(pdf))
                os.remove(pdf)
                os.remove(pyname)

        checkRun()

        ini.params['plot_data_dir'] = 'plot_data/'
        ini.saveFile(fname)
        callGetDist([fname, self.root])
        self.assertTrue(
            os.path.isfile(
                os.path.join(self.tempdir, 'plot_data', 'testchain_2D_x_y')))
        checkRun()
        shutil.rmtree(os.path.join(self.tempdir, 'plot_data'))
Ejemplo n.º 28
0
def makeGrid(batchPath, settingName=None, settings=None, readOnly=False, interactive=False):
    batchPath = os.path.abspath(batchPath) + os.sep

    # 0: chains, 1: importance sampling, 2: best-fit, 3: best-fit and Hessian
    cosmomcAction = 0

    if not settings:
        if not settingName:
            if not pathIsGrid(batchPath):
                raise Exception('Need to give name of setting file if batchPath/config does not exist')
            readOnly = True
            sys.path.insert(0, batchPath + 'config')
            sys.modules['batchJob'] = batchjob  # old name
            settings = __import__(IniFile(batchPath + 'config/config.ini').params['setting_file'].replace('.py', ''))
        else:
            settings = __import__(settingName, fromlist=['dummy'])

    batch = batchjob.batchJob(batchPath, settings.ini_dir)

    if hasattr(settings, 'skip'): batch.skip = settings.skip
    batch.makeItems(settings, messages=not readOnly)
    if readOnly:
        for jobItem in [b for b in batch.jobItems]:
            if not jobItem.chainExists():
                batch.jobItems.remove(jobItem)
        batch.save()
        print('OK, configured grid with %u existing chains' % (len(batch.jobItems)))
        return batch
    else:
        batch.makeDirectories(settings.__file__)
        batch.save()

    # priors and widths for parameters which are varied
    start_at_bestfit = getattr(settings, 'start_at_bestfit', False)
    params = getattr(settings, 'params', default_params)
    param_extra = getattr(settings, 'param_extra_opts', default_param_extra_opts)

    for jobItem in batch.items(wantSubItems=False):

        jobItem.makeChainPath()
        ini = IniFile()

        for param in jobItem.param_set:
            ini.params['param[' + param + ']'] = params[param]
            if param_extra is not None and param in param_extra:
                ini.params.update(param_extra[param])

        if hasattr(settings, 'extra_opts'):
            ini.params.update(settings.extra_opts)

        ini.params['file_root'] = jobItem.chainRoot

        cov_dir_name = getattr(settings, 'cov_dir', 'planck_covmats')
        covdir = os.path.join(batch.basePath, cov_dir_name)
        covmat = os.path.join(covdir, jobItem.name + '.covmat')
        if not os.path.exists(covmat):
            covNameMappings = getattr(settings, 'covNameMappings', None)
            mapped_name_norm = jobItem.makeNormedName(covNameMappings)[0]
            covmat_normed = os.path.join(covdir, mapped_name_norm + '.covmat')
            covmat = covmat_normed
            if not os.path.exists(covmat) and hasattr(jobItem.data_set,
                                                      'covmat'): covmat = batch.basePath + jobItem.data_set.covmat
            if not os.path.exists(covmat) and hasattr(settings, 'covmat'): covmat = batch.basePath + settings.covmat
        else:
            covNameMappings = None
        if os.path.exists(covmat):
            ini.params['propose_matrix'] = covmat
            if getattr(settings, 'newCovmats', True): ini.params['MPI_Max_R_ProposeUpdate'] = 20
        else:
            hasCov = False
            ini.params['MPI_Max_R_ProposeUpdate'] = 20
            covmat_try = []
            if 'covRenamer' in dir(settings):
                covmat_try += settings.covRenamer(jobItem.name)
                covmat_try += settings.covRenamer(mapped_name_norm)
            if hasattr(settings, 'covrenames'):
                for aname in [jobItem.name, mapped_name_norm]:
                    covmat_try += [aname.replace(old, new, 1) for old, new in settings.covrenames if old in aname]
                    for new1, old1 in settings.covrenames:
                        if old1 in aname:
                            name = aname.replace(old1, new1, 1)
                            covmat_try += [name.replace(old, new, 1) for old, new in settings.covrenames if old in name]
            if 'covWithoutNameOrder' in dir(settings):
                if covNameMappings:
                    removes = copy.deepcopy(covNameMappings)
                else:
                    removes = dict()
                for name in settings.covWithoutNameOrder:
                    if name in jobItem.data_set.names:
                        removes[name] = ''
                        covmat_try += [jobItem.makeNormedName(removes)[0]]
            covdir2 = os.path.join(batch.basePath, getattr(settings, 'cov_dir_fallback', cov_dir_name))
            for name in covmat_try:
                covmat = os.path.join(batch.basePath, covdir2, name + '.covmat')
                if os.path.exists(covmat):
                    ini.params['propose_matrix'] = covmat
                    print('covmat ' + jobItem.name + ' -> ' + name)
                    hasCov = True
                    break
            if not hasCov: print('WARNING: no matching specific covmat for ' + jobItem.name)

        ini.params['start_at_bestfit'] = start_at_bestfit
        updateIniParams(ini, jobItem.data_set.params, batch.commonPath)
        for deffile in settings.defaults:
            ini.defaults.append(batch.commonPath + deffile)
        if hasattr(settings, 'override_defaults'):
            ini.defaults = [batch.commonPath + deffile for deffile in settings.override_defaults] + ini.defaults

        ini.params['action'] = cosmomcAction
        ini.saveFile(jobItem.iniFile())
        if not start_at_bestfit:
            setMinimize(jobItem, ini)
            variant = '_minimize'
            ini.saveFile(jobItem.iniFile(variant))


            # add ini files for importance sampling runs
        for imp in jobItem.importanceJobs():
            if batch.hasName(imp.name.replace('_post', '')): raise Exception(
                'importance sampling something you already have?')
            for minimize in (False, True):
                if minimize and not getattr(imp, 'want_minimize', True): continue
                ini = IniFile()
                updateIniParams(ini, imp.importanceSettings, batch.commonPath)
                if cosmomcAction == 0 and not minimize:
                    for deffile in settings.importanceDefaults:
                        ini.defaults.append(batch.commonPath + deffile)
                    ini.params['redo_outroot'] = imp.chainRoot
                    ini.params['action'] = 1
                else:
                    ini.params['file_root'] = imp.chainRoot
                if minimize:
                    setMinimize(jobItem, ini)
                    variant = '_minimize'
                else:
                    variant = ''
                ini.defaults.append(jobItem.iniFile())
                ini.saveFile(imp.iniFile(variant))
                if cosmomcAction != 0: break

    if not interactive: return batch
    print('Done... to run do: python python/runbatch.py ' + batchPath)
    if not start_at_bestfit:
        print('....... for best fits: python python/runbatch.py ' + batchPath + ' --minimize')
    print('')
    print('for importance sampled: python python/runbatch.py ' + batchPath + ' --importance')
    print('for best-fit for importance sampled: python python/runbatch.py ' + batchPath + ' --importance_minimize')
Ejemplo n.º 29
0
def main(args):
    no_plots = False
    chain_root = args.chain_root
    if args.ini_file is None and chain_root is None:
        doError('Must give either a .ini file of parameters or a chain file root name. Run "GetDist.py -h" for help.')
    if not '.ini' in args.ini_file and chain_root is None:
        # use default settings acting on chain_root, no plots
        chain_root = args.ini_file
        args.ini_file = getdist.default_getdist_settings
        no_plots = True
    if not os.path.isfile(args.ini_file):
        doError('Parameter file does not exist: ' + args.ini_file)
    if chain_root and chain_root.endswith('.txt'):
        chain_root = chain_root[:-4]

    # Input parameters
    ini = IniFile(args.ini_file)

    # File root
    if chain_root is not None:
        in_root = chain_root
    else:
        in_root = ini.params['file_root']
    if not in_root:
        doError('Chain Root file name not given ')
    rootname = os.path.basename(in_root)

    if args.ignore_rows is not None:
        ignorerows = args.ignore_rows
    else:
        ignorerows = ini.float('ignore_rows', 0.0)

    samples_are_chains = ini.bool('samples_are_chains', True)
    
    paramnames = ini.string('parameter_names', '')

    # Create instance of MCSamples
    mc = MCSamples(in_root, files_are_chains=samples_are_chains, paramNamesFile=paramnames)

    mc.initParameters(ini)

    if ini.bool('adjust_priors', False) or ini.bool('map_params', False):
        doError(
            'To adjust priors or define new parameters, use a separate python script; see the python getdist docs for examples')

    plot_ext = ini.string('plot_ext', 'py')
    finish_run_command = ini.string('finish_run_command', '')

    no_plots = ini.bool('no_plots', no_plots)
    plots_only = ini.bool('plots_only', False)
    no_tests = plots_only or ini.bool('no_tests', False)

    thin_factor = ini.int('thin_factor', 0)
    thin_cool = ini.float('thin_cool', 1.0)

    make_single_samples = ini.bool('make_single_samples', False)
    single_thin = ini.int('single_thin', 1)
    cool = ini.float('cool', 1.0)

    chain_exclude = ini.int_list('exclude_chain')

    shade_meanlikes = ini.bool('shade_meanlikes', False)
    plot_meanlikes = ini.bool('plot_meanlikes', False)

    dumpNDbins = ini.bool('dump_ND_bins', False)

    out_dir = ini.string('out_dir', './')
    if out_dir:
        if not os.path.isdir(out_dir):
            os.mkdir(out_dir)
        print('producing files in directory ', out_dir)
    mc.out_dir = out_dir

    out_root = ini.string('out_root', '')
    if out_root:
        rootname = out_root
        print('producing files with with root ', out_root)
    mc.rootname = rootname

    rootdirname = os.path.join(out_dir, rootname)
    mc.rootdirname = rootdirname

    if 'do_minimal_1d_intervals' in ini.params:
        doError('do_minimal_1d_intervals no longer used; set credible_interval_threshold instead')

    line = ini.string('PCA_params', '')
    if line.lower() == 'all':
        PCA_params = mc.paramNames.list()
    else:
        PCA_params = line.split()
    PCA_num = ini.int('PCA_num', len(PCA_params))
    if PCA_num != 0:
        if PCA_num < 2:
            doError('Can only do PCA for 2 or more parameters')
        PCA_func = ini.string('PCA_func', '')
        # Characters representing functional mapping
        if PCA_func == '':
            PCA_func = ['N'] * PCA_num  # No mapping
        PCA_NormParam = ini.string('PCA_normparam', '') or None

    make_scatter_samples = ini.bool('make_scatter_samples', False)

    # ==============================================================================

    first_chain = ini.int('first_chain', 0)
    last_chain = ini.int('chain_num', -1)
    # -1 means keep reading until one not found

    # Chain files
    chain_files = chains.chainFiles(in_root, first_chain=first_chain, last_chain=last_chain,
                                    chain_exclude=chain_exclude)

    mc.loadChains(in_root, chain_files)

    mc.removeBurnFraction(ignorerows)
    mc.deleteFixedParams()
    mc.makeSingle()

    def filterParList(namestring, num=None):
        if not namestring.strip():
            pars = mc.paramNames.list()
        else:
            pars = []
            for name in namestring.split():
                if '?' in name or '*' in name:
                    pars += mc.paramNames.getMatches(name, strings=True)
                elif mc.paramNames.parWithName(name):
                    pars.append(name)
        if num is not None and len(pars) != num:
            print('%iD plot has missing parameter or wrong number of parameters: %s' % (num, pars))
            pars = None
        return pars


    if cool != 1:
        print('Cooling chains by ', cool)
        mc.cool(cool)

    mc.updateBaseStatistics()

    if not no_tests:
        mc.getConvergeTests(mc.converge_test_limit, writeDataToFile=True, feedback=True)

    mc.writeCovMatrix()
    mc.writeCorrelationMatrix()

    # Output thinned data if requested
    # Must do this with unsorted output
    if thin_factor != 0:
        thin_ix = mc.thin_indices(thin_factor)
        filename = rootdirname + '_thin.txt'
        mc.writeThinData(filename, thin_ix, thin_cool)

    print(mc.getNumSampleSummaryText().strip())
    if mc.likeStats: print(mc.likeStats.likeSummary().strip())

    if PCA_num > 0 and not plots_only:
        mc.PCA(PCA_params, PCA_func, PCA_NormParam, writeDataToFile=True)

    if not no_plots or dumpNDbins:
        # set plot_data_dir before we generate the 1D densities below
        plot_data_dir = ini.string('plot_data_dir', default='', allowEmpty=True)
        if plot_data_dir and not os.path.isdir(plot_data_dir):
            os.mkdir(plot_data_dir)
    else:
        plot_data_dir = None
    mc.plot_data_dir = plot_data_dir

    # Do 1D bins
    mc._setDensitiesandMarge1D(writeDataToFile=not no_plots and plot_data_dir, meanlikes=plot_meanlikes)

    if not no_plots:
        # Output files for 1D plots
        print('Calculating plot data...')

        plotparams = []
        line = ini.string('plot_params', '')
        if line not in ['', '0']:
            plotparams = filterParList(line)

        line = ini.string('plot_2D_param', '').strip()
        plot_2D_param = None
        if line and line != '0':
            plot_2D_param = line

        cust2DPlots = []
        if not plot_2D_param:
            # Use custom array of specific plots
            num_cust2D_plots = ini.int('plot_2D_num', 0)
            for i in range(1, num_cust2D_plots + 1):
                line = ini.string('plot' + str(i))
                pars = filterParList(line, 2)
                if pars is not None:
                    cust2DPlots.append(pars)
                else:
                    num_cust2D_plots -= 1

                
        triangle_params = []
        triangle_plot = ini.bool('triangle_plot', False)
        if triangle_plot:
            line = ini.string('triangle_params', '')
            triangle_params = filterParList(line)
            triangle_num = len(triangle_params)
            triangle_plot = triangle_num > 1

        num_3D_plots = ini.int('num_3D_plots', 0)
        plot_3D = []
        for ix in range(1, num_3D_plots + 1):
            line = ini.string('3D_plot' + str(ix))
            pars = filterParList(line, 3)
            if pars is not None:
                plot_3D.append(pars)
            else:
                num_3D_plots -= 1
            
      
        # Produce file of weight-1 samples if requested
        if (num_3D_plots and not make_single_samples or make_scatter_samples) and not no_plots:
            make_single_samples = True
            single_thin = max(1, int(round(mc.norm / mc.max_mult)) // mc.max_scatter_points)

        if plot_data_dir:
            if make_single_samples:
                filename = os.path.join(plot_data_dir, rootname.strip() + '_single.txt')
                mc.makeSingleSamples(filename, single_thin)

            # Write paramNames file
            mc.getParamNames().saveAsText(os.path.join(plot_data_dir, rootname + '.paramnames'))
            mc.getBounds().saveToFile(os.path.join(plot_data_dir, rootname + '.bounds'))

        make_plots = ini.bool('make_plots', False)

        done2D = {}

        filename = rootdirname + '.' + plot_ext
        mc.writeScriptPlots1D(filename, plotparams)
        if make_plots: runScript(filename)

        # Do 2D bins
        if plot_2D_param == 'corr':
            # In this case output the most correlated variable combinations
            print('...doing 2D plots for most correlated variables')
            cust2DPlots = mc.getCorrelatedVariable2DPlots()
            plot_2D_param = None
        elif plot_2D_param:
            mc.paramNames.parWithName(plot_2D_param, error=True)  # just check

        if cust2DPlots or plot_2D_param:
            print('...producing 2D plots')
            filename = rootdirname + '_2D.' + plot_ext
            done2D = mc.writeScriptPlots2D(filename, plot_2D_param, cust2DPlots,
                                           writeDataToFile=plot_data_dir, shade_meanlikes=shade_meanlikes)
            if make_plots: runScript(filename)

        if triangle_plot:
            # Add the off-diagonal 2D plots
            print('...producing triangle plot')
            filename = rootdirname + '_tri.' + plot_ext
            mc.writeScriptPlotsTri(filename, triangle_params)
            for i, p2 in enumerate(triangle_params):
                for p1 in triangle_params[i + 1:]:
                    if not done2D.get((p1, p2)) and plot_data_dir:
                        mc.get2DDensityGridData(p1, p2, writeDataToFile=True, meanlikes=shade_meanlikes)
            if make_plots: runScript(filename)

        # Do 3D plots (i.e. 2D scatter plots with coloured points)
        if num_3D_plots:
            print('...producing ', num_3D_plots, '2D colored scatter plots')
            filename = rootdirname + '_3D.' + plot_ext
            mc.writeScriptPlots3D(filename, plot_3D)
            if make_plots: runScript(filename)

    if not plots_only:
        # Write out stats marginalized
        mc.getMargeStats().saveAsText(rootdirname + '.margestats')

        # Limits from global likelihood
        if mc.loglikes is not None: mc.getLikeStats().saveAsText(rootdirname + '.likestats')


    if dumpNDbins:
        num_bins_ND = ini.int('num_bins_ND', 10)
        line = ini.string('ND_params','')
        
        if line not in ["",'0']:
            ND_params = filterParList(line)
            print(ND_params)

            ND_dim=len(ND_params)
            print(ND_dim)
           
            mc.getRawNDDensityGridData(ND_params, writeDataToFile=True,
                                       meanlikes=shade_meanlikes)
    



    # System command
    if finish_run_command:
        finish_run_command = finish_run_command.replace('%ROOTNAME%', rootname)
        finish_run_command = finish_run_command.replace('%PLOTDIR%', plot_data_dir)
        finish_run_command = finish_run_command.replace('%PLOTROOT%', os.path.join(plot_data_dir, rootname))
        os.system(finish_run_command)
Ejemplo n.º 30
0
def cosmomc_root_to_cobaya_info_dict(root: str, derived_to_input=()) -> InputDict:
    """
    Given the root name of existing cosmomc chain files, tries to construct a Cobaya
    input parameter dictionary with roughly equivalent settings. The output
    dictionary can be used for importance sampling from CosmoMC chains in simple cases
    using Cobaya's 'post'.

    Parameters in the optional derived_to_input list are converted from being derived
    parameters in CosmoMC to non-derived in Cobaya.

    This is by no means guaranteed to produce valid or equivalent results, use at your
    own risk with careful checking! Note that the parameter dictionary will not have
    settings for CAMB, samplers etc, which is OK for importance sampling but you would
    need to add them as necessary to reproduce results.

    Parameter chains in CosmoMC format are available for Planck
    from https://pla.esac.esa.int/pla/#home

    """
    names = ParamNames(root + '.paramnames')
    if os.path.exists(root + '.ranges'):
        ranges = ParamBounds(root + '.ranges')
    else:
        ranges = None
    d: ParamsDict = {}
    info: InputDict = {'params': d}
    for par, name in zip(names.names, names.list()):
        if name.startswith('chi2_') and not name.startswith('chi2__'):
            if name == 'chi2_prior':
                continue
            name = name.replace('chi2_', 'chi2__')
        if name.startswith('minuslogprior') or name == 'chi2':
            continue
        param_dict: ParamDict = {'latex': par.label}
        d[name] = param_dict
        if par.renames:
            param_dict['renames'] = par.renames
        if par.isDerived:
            if name not in derived_to_input:
                param_dict['derived'] = True
            else:
                par.isDerived = False
        if ranges and name in ranges.names:
            if par.isDerived:
                low_up = ranges.getLower(name), ranges.getUpper(name)
                if any(r is not None for r in low_up):
                    param_dict['min'], param_dict['max'] = low_up
            else:
                param_dict["prior"] = [ranges.getLower(name), ranges.getUpper(name)]
    if ranges:
        d.update(ranges.fixedValueDict())
    if names.numberOfName('As') == -1 and names.numberOfName('logA') != -1:
        d['As'] = {'latex': r'A_\mathrm{s}', 'value': 'lambda logA: 1e-10*np.exp(logA)'}
    if names.numberOfName('cosmomc_theta') == -1 and names.numberOfName('theta') != -1:
        d['cosmomc_theta'] = {'latex': r'\theta_{\rm MC}',
                              'value': 'lambda theta: theta/100'}

    # special case for CosmoMC (e.g. Planck) chains
    if os.path.exists(root + '.inputparams'):
        inputs = IniFile(root + '.inputparams')
        for key, value in inputs.params.items():
            if key.startswith('prior['):
                if 'prior' not in info:
                    info['prior'] = {}
                param = key[6:-1]
                if param in d:
                    mean, std = (float(v.strip()) for v in value.split())
                    if not names.parWithName(param).isDerived:
                        info['prior'][param + '_prior'] = \
                            "lambda %s: stats.norm.logpdf(%s, loc=%g, scale=%g)" % (
                                param, param, mean, std)

            if key.startswith('linear_combination['):
                param = key.replace('linear_combination[', '')[:-1]
                prior = inputs.params.get('prior[%s]' % param, None)
                if prior:
                    weights = inputs.params.get('linear_combination_weights[%s]' % param,
                                                None)
                    if not weights:
                        raise ValueError(
                            'linear_combination[%s] prior found but not weights' % param)
                    weights = [float(w.strip()) for w in weights.split()]
                    inputs = value.split()
                    if 'prior' not in info:
                        info['prior'] = {}
                    mean, std = (float(v.strip()) for v in prior.split())
                    linear = "".join(
                        "%+g*%s" % (_w, _p) for _w, _p in zip(weights, inputs))
                    info['prior']['SZ'] = \
                        "lambda %s: stats.norm.logpdf(%s, loc=%g, scale=%g)" % (
                            ",".join(inputs), linear, mean, std)
    if os.path.exists(root + '.likelihoods'):
        info_like: LikesDict = {}
        info['likelihood'] = info_like
        with open(root + '.likelihoods', 'r') as f:
            for line in f.readlines():
                if line.strip():
                    like = line.split()[2]
                    info_like[like] = None
    else:
        print('You need to mention in the likelihood block with with "name: None"'
              'for each likelihood in the input chain')

    info['output'] = root
    return info