コード例 #1
0
ファイル: CMB_CamSpec.py プロジェクト: EFTCAMB/EFTCosmoMC
    def __init__(self, dataset, dataset_params={}, silent=False):
        if not silent: print('loading: %s' % dataset)
        ini = IniFile(dataset)
        ini.params.update(dataset_params)
        spectra = np.loadtxt(ini.relativeFileName('cl_hat_file'))
        covmat_cl = ini.split('covmat_cl')
        use_cl = ini.split('use_cl', covmat_cl)
        if ini.hasKey('use_range'):
            used_ell = ini.params['use_range']
            if isinstance(used_ell, dict):
                print('Using range %s' % used_ell)
                for key, value in used_ell.items():
                    used_ell[key] = range_to_ells(value)
            else:
                if not silent: print('Using range: %s' % used_ell)
                used_ell = range_to_ells(used_ell)
        else:
            used_ell = None
        data_vector = []
        nX = 0
        used_indices = []
        with open(ini.relativeFileName('data_ranges', "r")) as f:
            lines = f.readlines()
            while not lines[-1].strip():
                lines = lines[:-1]
            self.Nspec = len(lines)
            lmin = np.zeros(self.Nspec, dtype=np.int)
            lmax = np.zeros(self.Nspec, dtype=np.int)
            self.cl_names = []
            self.ell_ranges = np.empty(self.Nspec, dtype=np.object)
            self.used_sizes = np.zeros(self.Nspec, dtype=np.int)
            for i, line in enumerate(lines):
                items = line.split()
                tp = items[0]
                self.cl_names.append(tp)
                lmin[i], lmax[i] = [int(x) for x in items[1:]]
                if lmax[i] and lmax[i] >= lmin[i]:
                    n = lmax[i] - lmin[i] + 1
                    data_vector.append(spectra[lmin[i]:lmax[i] + 1, i])
                    if tp in use_cl:
                        if used_ell is not None and (not isinstance(
                                used_ell, dict) or tp in used_ell):
                            if isinstance(used_ell, dict):
                                ells = used_ell[tp]
                            else:
                                ells = used_ell
                            self.ell_ranges[i] = np.array([
                                L for L in range(lmin[i], lmax[i] + 1)
                                if L in ells
                            ],
                                                          dtype=np.int)
                            used_indices.append(self.ell_ranges[i] +
                                                (nX - lmin[i]))
                        else:
                            used_indices.append(range(nX, nX + n))
                            self.ell_ranges[i] = range(lmin[i], lmax[i] + 1)
                        self.used_sizes[i] = len(self.ell_ranges[i])
                    else:
                        lmax[i] = -1
                    nX += n

        self.cl_used = np.array([name in use_cl for name in self.cl_names],
                                dtype=bool)
        covfile = ini.relativeFileName('covmat_fiducial')
        with open(covfile, "rb") as f:
            cov = np.fromfile(f,
                              dtype=[np.float32,
                                     np.float64]['64.bin' in covfile])
        assert (nX**2 == cov.shape[0])
        used_indices = np.concatenate(used_indices)
        self.data_vector = np.concatenate(data_vector)[used_indices]
        self.cov = cov.reshape(nX, nX)[np.ix_(used_indices,
                                              used_indices)].astype(np.float64)
        if not silent:
            for name, mn, mx in zip(self.cl_names, lmin, lmax):
                if name in use_cl:
                    print(name, mn, mx)
            print('Number of data points: %s' % self.cov.shape[0])
        self.lmax = lmax
        self.lmin = lmin
        max_l = np.max(self.lmax)
        self.ls = np.arange(max_l + 1)
        self.llp1 = self.ls * (self.ls + 1)

        if np.any(self.cl_used[:4]):
            pivot = 3000
            self.sz_143 = self.read_normalized(
                ini.relativeFileName('sz143file'), pivot)[:max_l + 1]
            self.ksz = self.read_normalized(ini.relativeFileName('kszfile'),
                                            pivot)[:max_l + 1]
            self.tszxcib = self.read_normalized(
                ini.relativeFileName('tszxcibfile'), pivot)[:max_l + 1]

            self.cib_217 = self.read_normalized(
                ini.relativeFileName('cib217file'), pivot)[:max_l + 1]

            self.dust = np.vstack(
                (self.read_normalized(
                    ini.relativeFileName('dust100file'))[:max_l + 1],
                 self.read_normalized(
                     ini.relativeFileName('dust143file'))[:max_l + 1],
                 self.read_normalized(
                     ini.relativeFileName('dust217file'))[:max_l + 1],
                 self.read_normalized(
                     ini.relativeFileName('dust143x217file'))[:max_l + 1]))
            self.lnrat = self.ls * 0
            l_min = np.min(lmin[self.cl_used])
            self.lnrat[l_min:] = np.log(self.ls[l_min:] / np.float64(pivot))

        import hashlib
        cache_file = self.dataset_file.replace(
            '.dataset', '_covinv_%s.npy' %
            hashlib.md5(str(ini.params).encode('utf8')).hexdigest())
        if use_cache and os.path.exists(cache_file):
            self.covinv = np.load(cache_file).astype(np.float64)
        else:
            self.covinv = np.linalg.inv(self.cov)
            if use_cache: np.save(cache_file, self.covinv.astype(np.float32))