コード例 #1
0
ファイル: __init__.py プロジェクト: B-Rich/montepython_public
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # are there conflicting experiments?
        if 'bao_boss_aniso_gauss_approx' in data.experiments:
            raise io_mp.LikelihoodError(
                'conflicting bao_boss_aniso_gauss_approx measurments')

        # self.z, .hdif, .dafid, and .rsfid are read from the data file

        # load the ansio likelihood
        filepath = os.path.join(self.data_directory, self.file)
        # alpha_perp = D_A / rs (rs / DA)_fid
        # alpha_para = (H rs)_fid / (H rs)
        prob_dtype = [('alpha_perp', np.float64),
            ('alpha_para', np.float64),
            ('prob', np.float64)]
        prob = np.loadtxt(
            filepath, delimiter=None,
            comments='#', skiprows=0, dtype=prob_dtype)
        size = np.sqrt(len(prob))
        x = prob['alpha_perp'].reshape(size, size)[:,0]
        y = prob['alpha_para'].reshape(size, size)[0,:]
        Z = prob['prob'].reshape(size, size)
        normZ = np.max(Z)
        Z = Z/normZ
        # use the faster interp.RectBivariateSpline interpolation scheme
        self.prob_interp = interp.RectBivariateSpline(x, y, Z, kx=3, ky=3, s=0)
コード例 #2
0
ファイル: __init__.py プロジェクト: wilmarcardonac/dea
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # define array for values of z and data points
        self.z = np.array([], 'float64')
        self.data = np.array([], 'float64')
        self.error = np.array([], 'float64')
        self.type = np.array([], 'int')

        # read redshifts and data points
        with open(os.path.join(self.data_directory, self.file), 'r') as filein:
            for line in filein:
                if line.strip() and line.find('#') == -1:
                    # the first entry of the line is the identifier
                    this_line = line.split()
                    # insert into array if this id is not manually excluded
                    if not this_line[0] in self.exclude:
                        self.z = np.append(self.z, float(this_line[1]))
                        self.data = np.append(self.data, float(this_line[2]))
                        self.error = np.append(self.error, float(this_line[3]))
                        self.type = np.append(self.type, int(this_line[4]))

        # number of data points
        self.num_points = np.shape(self.z)[0]
コード例 #3
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # needed arguments in order to get sigma_8(z) up to z=2 with correct precision
        self.need_cosmo_arguments(data, {'output': 'mPk'})
        self.need_cosmo_arguments(data, {'P_k_max_h/Mpc': '1.'})
        self.need_cosmo_arguments(data, {'z_max_pk': '2.'})

        # are there conflicting experiments?
        if 'bao_fs_boss_dr12' in data.experiments:
            raise io_mp.LikelihoodError('conflicting bao measurments')

        # define arrays for values of z and data points
        self.z = np.array([], 'float64')
        self.fsig8 = np.array([], 'float64')
        self.sfsig8 = np.array([], 'float64')

        # read redshifts and data points
        with open(os.path.join(self.data_directory, self.data_file),
                  'r') as filein:
            for i, line in enumerate(filein):
                if line.strip() and line.find('#') == -1:
                    this_line = line.split()
                    self.z = np.append(self.z, float(this_line[0]))
                    self.fsig8 = np.append(self.fsig8, float(this_line[1]))
                    self.sfsig8 = np.append(self.sfsig8, float(this_line[2]))

        # positions of the data
        self.Wigglez = [13, 14, 15]
        self.SDSS = [19, 20, 21, 22]
        # AP effect corrections
        self.HdAz = [
            5905.2, 5905.2, 5902.17, 27919.8, 40636.6, 45463.8, 47665.2,
            90926.2, 63409.3, 88415.1, 78751., 132588., 102712., 132420.,
            155232., 134060., 179999., 263053., 200977., 242503., 289340.,
            352504.
        ]
        # read covariance matrices
        self.CijWig = np.loadtxt(os.path.join(self.data_directory,
                                              self.cov_Wig_file),
                                 unpack=True)
        self.CijSDSS = np.loadtxt(os.path.join(self.data_directory,
                                               self.cov_SDSS_file),
                                  unpack=True)
        self.Cijfs8 = np.diagflat(np.power(self.sfsig8, 2))
        self.Cijfs8[(self.Wigglez[0] - 1):self.Wigglez[-1],
                    (self.Wigglez[0] - 1):self.Wigglez[-1]] = self.CijWig
        self.Cijfs8[(self.SDSS[0] - 1):self.SDSS[-1],
                    (self.SDSS[0] - 1):self.SDSS[-1]] = self.CijSDSS

        # number of bins
        self.num_points = np.shape(self.z)[0]

        # Scale dependent growth; some params: wavenumber k in Mpc etc. The params for the numerical derivative are a bit jerky, adjust at your own peril.
        self.k0 = 0.1
        self.dz = 0.01
        self.hstep = 0.001
        #Make a mock array; this will contain the redshifts z\in[0,2,0.01] we need for the interpolation
        self.zed = np.arange(0.0, 2.0, self.dz)
コード例 #4
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # define array for values of z and data points
        self.corr_types = []
        self.z = np.array([], 'float64')
        self.types = []

        scan_locations = {}
        scan_locations['xcf'] = self.data_directory + '/' + self.xcf_scan

        # read redshifts and data points
        for line in open(os.path.join(self.data_directory, self.file), 'r'):
            if (line.strip().find('#')
                    == -1) and (len(line.strip()) > 0) and (line.split()[0]
                                                            == 'xcf'):
                self.corr_types += [line.split()[0]]
                self.z = np.append(self.z, float(line.split()[1]))
                self.types += [
                    set([int(line.split()[2]),
                         int(line.split()[3])])
                ]

        # number of data points
        self.num_points = np.shape(self.z)[0]

        #Make our interpolators
        self.chi2_interpolators = util.chi2_interpolators(
            scan_locations, self.transverse_fid, self.parallel_fid)
コード例 #5
0
    def __init__(self, path, data, command_line):
        """
        The structure differs significantly from other likelihoods, in order to
        follow as simply as possible the data structure.

        Each redshift bin in WiggleZ contains a .dataset file with information
        on this redshift bin. The structure to read this has been encoded in
        the class Likelihood_mpk. It will be used also with the next data
        release of SDSS.

        The whole WiggleZ is then made out of the four Likelihood_mpk:
        WiggleZ_a, b, c and d, which are **defined dynamically** thanks to the
        :func:`type` function in python, inheriting from
        :class:`Likelihood_mpk`.

        Some additional keyword arguments are sent to the initialization of
        these classes, in order to use the function
        :meth:`add_common_knowledge`. It then gives a dictionary of shared
        attributes that should be distributed to all four redshift bins.

        """

        Likelihood.__init__(self, path, data, command_line)

        # This obscure command essentially creates dynamically 4 likelihoods,
        # respectively called WiggleZ_a, b, c and d, inheriting from
        # Likelihood_mpk.
        for elem in ['a', 'b', 'c', 'd']:
            exec("WiggleZ_%s = type('WiggleZ_%s', (Likelihood_mpk, ), {})" % \
                (elem, elem))

        # Initialize one after the other the four independent redshift bins (note:
        # the order in the array self.redshift_bins_files) must be respected !
        self.wigglez_a = WiggleZ_a(os.path.join(self.data_directory,
                                                self.redshift_bins_files[0]),
                                   data,
                                   command_line,
                                   common=True,
                                   common_dict=self.dictionary)

        self.wigglez_b = WiggleZ_b(os.path.join(self.data_directory,
                                                self.redshift_bins_files[1]),
                                   data,
                                   command_line,
                                   common=True,
                                   common_dict=self.dictionary)

        self.wigglez_c = WiggleZ_c(os.path.join(self.data_directory,
                                                self.redshift_bins_files[2]),
                                   data,
                                   command_line,
                                   common=True,
                                   common_dict=self.dictionary)

        self.wigglez_d = WiggleZ_d(os.path.join(self.data_directory,
                                                self.redshift_bins_files[3]),
                                   data,
                                   command_line,
                                   common=True,
                                   common_dict=self.dictionary)
コード例 #6
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # are there conflicting experiments?
        if 'bao_boss_aniso_gauss_approx' in data.experiments:
            raise io_mp.LikelihoodError(
                'conflicting bao_boss_aniso_gauss_approx measurments')

        # self.z, .hdif, .dafid, and .rsfid are read from the data file

        # load the ansio likelihood
        filepath = os.path.join(self.data_directory, self.file)
        # alpha_perp = D_A / rs (rs / DA)_fid
        # alpha_para = (H rs)_fid / (H rs)
        prob_dtype = [('alpha_perp', np.float64), ('alpha_para', np.float64),
                      ('prob', np.float64)]
        prob = np.loadtxt(filepath,
                          delimiter=None,
                          comments='#',
                          skiprows=0,
                          dtype=prob_dtype)
        size = int(np.sqrt(len(prob)))
        x = prob['alpha_perp'].reshape(size, size)[:, 0]
        y = prob['alpha_para'].reshape(size, size)[0, :]
        Z = prob['prob'].reshape(size, size)
        normZ = np.max(Z)
        Z = Z / normZ
        # use the faster interp.RectBivariateSpline interpolation scheme
        self.prob_interp = interp.RectBivariateSpline(x, y, Z, kx=3, ky=3, s=0)
コード例 #7
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        #ZP: uncomment if run on its own
        #self.need_cosmo1_arguments(data, {'output': 'mPk'})
        #self.need_cosmo1_arguments(data, {'P_k_max_h/Mpc': '1.'})
        #self.need_cosmo1_arguments(data, {'z_max_pk': '1.'})
        #self.need_cosmo2_arguments(data, {'output': 'mPk'})
        #self.need_cosmo2_arguments(data, {'P_k_max_h/Mpc': '1.'})
        #self.need_cosmo2_arguments(data, {'z_max_pk': '1.'})

        # define array for values of z and data points
        self.data = np.array([], 'float64')
        self.data_type = np.array([], 'int')

        # read redshifts and data points
        with open(os.path.join(self.data_directory, self.data_file),
                  'r') as filein:
            for line in filein:
                if line.strip() and line.find('#') == -1:
                    # the first entry of the line is the identifier
                    this_line = line.split()
                    ## insert into array if this id is not manually excluded
                    self.data = np.append(self.data, float(this_line[1]))
                    self.data_type = np.append(self.data_type,
                                               int(this_line[2]))
#self.data = [3.045,  0.9649, 1.0409]
#self.data_type = [1, 2, 3]
# read covariance matrix
        self.covmat = np.loadtxt(
            os.path.join(self.data_directory, self.cov_file))
        self.cholesky_transform = cholesky(self.covmat, lower=True)
コード例 #8
0
ファイル: __init__.py プロジェクト: alulujasmine/gambit_1.5
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # are there conflicting experiments?
        conflicting_experiments = [
            'bao', 'bao_boss', 'bao_known_rs'
            'bao_boss_aniso', 'bao_boss_aniso_gauss_approx', 'bao_smallz_2014'
        ]
        for experiment in conflicting_experiments:
            if experiment in data.experiments:
                raise io_mp.LikelihoodError('conflicting BAO measurments')

        # define array for values of z and data points
        self.z = np.array([], 'float64')
        self.data = np.array([], 'float64')
        self.error = np.array([], 'float64')
        self.type = np.array([], 'int')

        # read redshifts and data points
        with open(os.path.join(self.data_directory, self.file), 'r') as filein:
            for line in filein:
                if line.strip() and line.find('#') == -1:
                    # the first entry of the line is the identifier
                    this_line = line.split()
                    # insert into array if this id is not manually excluded
                    if not this_line[0] in self.exclude:
                        self.z = np.append(self.z, float(this_line[1]))
                        self.data = np.append(self.data, float(this_line[2]))
                        self.error = np.append(self.error, float(this_line[3]))
                        self.type = np.append(self.type, int(this_line[4]))

        # number of data points
        self.num_points = np.shape(self.z)[0]
コード例 #9
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # needed arguments in order to get sigma_8(z) up to z=1 with correct precision
        if 'K1K_CorrelationFunctions_2cosmos_geo_vs_growth' in data.experiments:
            print('Conflict!- using kids P_K')
        else:
            self.need_cosmo1_arguments(data, {'output': 'mPk'})
            self.need_cosmo1_arguments(data, {'P_k_max_h/Mpc': self.k_max})
            self.need_cosmo2_arguments(data, {'output': 'mPk'})
            self.need_cosmo2_arguments(data, {'P_k_max_h/Mpc': self.k_max})
            self.need_cosmo1_arguments(data, {'z_max_pk': self.k_max})
            self.need_cosmo2_arguments(data, {'z_max_pk': self.k_max})

        # are there conflicting experiments?
        if 'bao_boss_aniso' in data.experiments:
            raise io_mp.LikelihoodError(
                'conflicting bao_boss_aniso measurments')

        # define arrays for values of z and data points
        self.z = np.array([], 'float64')
        self.DM_rdfid_by_rd_in_Mpc = np.array([], 'float64')
        self.H_rd_by_rdfid_in_km_per_s_per_Mpc = np.array([], 'float64')
        self.fsig8 = np.array([], 'float64')

        # read redshifts and data points
        with open(os.path.join(self.data_directory, self.data_file),
                  'r') as filein:
            for i, line in enumerate(filein):
                if line.strip() and line.find('#') == -1:
                    this_line = line.split()
                    # load redshifts and D_M * (r_s / r_s_fid)^-1 in Mpc
                    if this_line[1] == 'dM(rsfid/rs)':
                        self.z = np.append(self.z, float(this_line[0]))
                        self.DM_rdfid_by_rd_in_Mpc = np.append(
                            self.DM_rdfid_by_rd_in_Mpc, float(this_line[2]))
                    # load H(z) * (r_s / r_s_fid) in km s^-1 Mpc^-1
                    elif this_line[1] == 'Hz(rs/rsfid)':
                        self.H_rd_by_rdfid_in_km_per_s_per_Mpc = np.append(
                            self.H_rd_by_rdfid_in_km_per_s_per_Mpc,
                            float(this_line[2]))
                    # load f * sigma8
                    elif this_line[1] == 'fsig8':
                        self.fsig8 = np.append(self.fsig8, float(this_line[2]))

        # read covariance matrix
        self.covmat = np.loadtxt(
            os.path.join(self.data_directory, self.cov_file))
        self.cholesky_transform = cholesky(self.covmat, lower=True)

        # number of bins
        self.num_bins = np.shape(self.z)[0]

        # number of data points
        self.num_points = np.shape(self.covmat)[0]
コード例 #10
0
ファイル: __init__.py プロジェクト: jlvdb/montepython_public
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # exclude the isotropic CMASS experiment when the anisotrpic
        # measurement is also used
        exclude_isotropic_CMASS = False

        conflicting_experiments = [
            'bao_boss_aniso', 'bao_boss_aniso_gauss_approx'
        ]
        for experiment in conflicting_experiments:
            if experiment in data.experiments:
                exclude_isotropic_CMASS = True

        if exclude_isotropic_CMASS:
            warnings.warn("excluding isotropic CMASS measurement")
            if not hasattr(self, 'exclude') or self.exclude == None:
                self.exclude = ['CMASS']
            else:
                self.exclude.append('CMASS')

        # define array for values of z and data points
        self.z = np.array([], 'float64')
        self.data = np.array([], 'float64')
        self.error = np.array([], 'float64')
        self.type = np.array([], 'int')

        # read redshifts and data points
        self.fid_values_exist = False
        if os.path.exists(os.path.join(self.data_directory,
                                       self.fiducial_file)):
            self.fid_values_exist = True
            with open(os.path.join(self.data_directory, self.fiducial_file),
                      'r') as filein:
                for line in filein:
                    if line.strip() and line.find('#') == -1:
                        # the first entry of the line is the identifier
                        this_line = line.split()
                        # insert into array if this id is not manually excluded
                        if not this_line[0] in self.exclude:
                            self.z = np.append(self.z, float(this_line[1]))
                            self.data = np.append(self.data,
                                                  float(this_line[2]))
                            self.error = np.append(self.error,
                                                   float(this_line[3]))
                            self.type = np.append(self.type, int(this_line[4]))

            # number of data points
            self.num_points = np.shape(self.z)[0]
コード例 #11
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        measurements = np.loadtxt(os.path.join(self.data_directory, self.file))
        self.z_eff, self.Dv = measurements[:, 0], measurements[:, 1]

        # Read the inverse covariance matrix, stored in data
        self.inverse_covmat = np.loadtxt(os.path.join(
            self.data_directory, self.inverse_covmat_file))
        # Multiply by 1e-4, as explained in Table 4
        self.inverse_covmat *= 1e-4

        # The matrix should be symmetric
        assert (self.inverse_covmat.T == self.inverse_covmat).all()
コード例 #12
0
ファイル: __init__.py プロジェクト: wilmarcardonac/dea
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        measurements = np.loadtxt(os.path.join(self.data_directory, self.file))
        self.z_eff, self.Dv = measurements[:, 0], measurements[:, 1]

        # Read the inverse covariance matrix, stored in data
        self.inverse_covmat = np.loadtxt(
            os.path.join(self.data_directory, self.inverse_covmat_file))
        # Multiply by 1e-4, as explained in Table 4
        self.inverse_covmat *= 1e-4

        # The matrix should be symmetric
        assert (self.inverse_covmat.T == self.inverse_covmat).all()
コード例 #13
0
    def __init__(self, path, data, command_line):
        """
        The structure differs significantly from other likelihoods, in order to
        follow as simply as possible the data structure.

        Each redshift bin in WiggleZ contains a .dataset file with information
        on this redshift bin. The structure to read this has been encoded in
        the class Likelihood_mpk. It will be used also with the next data
        release of SDSS.

        The whole WiggleZ is then made out of the four Likelihood_mpk:
        WiggleZ_a, b, c and d, which are **defined dynamically** thanks to the
        :func:`type` function in python, inheriting from
        :class:`Likelihood_mpk`.

        Some additional keyword arguments are sent to the initialization of
        these classes, in order to use the function
        :meth:`add_common_knowledge`. It then gives a dictionary of shared
        attributes that should be distributed to all four redshift bins.

        """

        Likelihood.__init__(self, path, data, command_line)

        # This obscure command essentially creates dynamically 4 likelihoods,
        # respectively called WiggleZ_a, b, c and d, inheriting from
        # Likelihood_mpk.
        for elem in ['a', 'b', 'c', 'd']:
            exec("WiggleZ_%s = type('WiggleZ_%s', (Likelihood_mpk, ), {})" % \
                (elem, elem))

        # Initialize one after the other the four independent redshift bins (note:
        # the order in the array self.redshift_bins_files) must be respected !
        self.wigglez_a = WiggleZ_a(
            os.path.join(self.data_directory, self.redshift_bins_files[0]),
            data, command_line, common=True, common_dict=self.dictionary)

        self.wigglez_b = WiggleZ_b(
            os.path.join(self.data_directory, self.redshift_bins_files[1]),
            data, command_line, common=True, common_dict=self.dictionary)

        self.wigglez_c = WiggleZ_c(
            os.path.join(self.data_directory, self.redshift_bins_files[2]),
            data, command_line, common=True, common_dict=self.dictionary)

        self.wigglez_d = WiggleZ_d(
            os.path.join(self.data_directory, self.redshift_bins_files[3]),
            data, command_line, common=True, common_dict=self.dictionary)
コード例 #14
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # are there conflicting experiments?
        if 'bao_boss_aniso' in data.experiments:
            raise io_mp.LikelihoodError(
                'conflicting bao_boss_aniso measurments')

        # define array for values of z and data points
        self.z = np.array([], 'float64')
        self.DA_rdfid_by_rd_in_Mpc = np.array([], 'float64')
        self.DA_error = np.array([], 'float64')
        self.H_rd_by_rdfid_in_km_per_s_per_Mpc = np.array([], 'float64')
        self.H_error = np.array([], 'float64')
        self.cross_corr = np.array([], 'float64')
        self.rd_fid_in_Mpc = np.array([], 'float64')

        # read redshifts and data points
        i = 0
        with open(os.path.join(self.data_directory, self.file), 'r') as filein:
            for i, line in enumerate(filein):
                if line.find('#') == -1:
                    this_line = line.split()
                    # this_line[0] is some identifier
                    self.z = np.append(self.z, float(this_line[1]))
                    self.DA_rdfid_by_rd_in_Mpc = np.append(
                        self.DA_rdfid_by_rd_in_Mpc, float(this_line[2]))
                    self.DA_error = np.append(
                        self.DA_error, float(this_line[3]))
                    self.H_rd_by_rdfid_in_km_per_s_per_Mpc = np.append(
                        self.H_rd_by_rdfid_in_km_per_s_per_Mpc, float(this_line[4]))
                    self.H_error = np.append(
                        self.H_error, float(this_line[5]))
                    self.cross_corr = np.append(
                        self.cross_corr, float(this_line[6]))
                    self.rd_fid_in_Mpc = np.append(
                        self.rd_fid_in_Mpc, float(this_line[7]))

                    # is the cross correlation coefficient valid
                    if self.cross_corr[i] < -1.0 or self.cross_corr[i] > 1.0:
                        raise io_mp.LikelihoodError(
                            "invalid cross correlation coefficient in entry "
                            "%d: %f" % (i, self.cross_corr[i]))

        # number of data points
        self.num_points = np.shape(self.z)[0]
コード例 #15
0
ファイル: __init__.py プロジェクト: B-Rich/montepython_public
    def __init__(self, path, data, command_line):

        # Define a default value, empty, for which field to use with the
        # likelihood. The proper value should be initialised in the .data file.
        Likelihood.__init__(self, path, data, command_line)

        # Create arrays that will store the values for all the possibly tested
        # fields.
        self.C_l = []
        self.C_l_hat = []
        self.N_l = []
        self.C_fl = []
        self.M_inv = []
        self.bpwf_l = []
        self.bpwf_Cs_l = []

        # Recover all the relevant quantities for the likelihood computation
        # from the BICEP collaboration, which includes the band power window
        # functions (bpwf). It assumes that in the "root" directory, there is a
        # "windows" directory which contains the band power window functions
        # from BICEP2.
        for field in self.fields:
            C_l, C_l_hat, N_l, C_fl, M_inv, bpwf_l, bpwf_Cs_l = bu.init(
                "bicep2",
                field,
                self.data_directory)
            self.C_l.append(C_l)
            self.C_l_hat.append(C_l_hat)
            self.N_l.append(N_l)
            self.C_fl.append(C_fl)
            self.M_inv.append(M_inv)
            self.bpwf_l.append(bpwf_l)
            self.bpwf_Cs_l.append(bpwf_Cs_l)

        # Read the desired max ell from the band power window function.
        self.l_max = max([elem[-1] for elem in self.bpwf_l])

        # Require tensor modes from Class
        arguments = {
            'output': 'tCl pCl lCl',
            'lensing': 'yes',
            'modes': 's, t',
            'l_max_scalars': self.l_max,
            'l_max_tensors': self.l_max,}
        self.need_cosmo_arguments(data, arguments)
コード例 #16
0
    def __init__(self, path, data, command_line):

        # Define a default value, empty, for which field to use with the
        # likelihood. The proper value should be initialised in the .data file.
        Likelihood.__init__(self, path, data, command_line)

        # Create arrays that will store the values for all the possibly tested
        # fields.
        self.C_l = []
        self.C_l_hat = []
        self.N_l = []
        self.C_fl = []
        self.M_inv = []
        self.bpwf_l = []
        self.bpwf_Cs_l = []

        # Recover all the relevant quantities for the likelihood computation
        # from the BICEP collaboration, which includes the band power window
        # functions (bpwf). It assumes that in the "root" directory, there is a
        # "windows" directory which contains the band power window functions
        # from BICEP2.
        for field in self.fields:
            C_l, C_l_hat, N_l, C_fl, M_inv, bpwf_l, bpwf_Cs_l = bu.init(
                "bicep2",
                field,
                self.data_directory)
            self.C_l.append(C_l)
            self.C_l_hat.append(C_l_hat)
            self.N_l.append(N_l)
            self.C_fl.append(C_fl)
            self.M_inv.append(M_inv)
            self.bpwf_l.append(bpwf_l)
            self.bpwf_Cs_l.append(bpwf_Cs_l)

        # Read the desired max ell from the band power window function.
        self.l_max = max([elem[-1] for elem in self.bpwf_l])

        # Require tensor modes from Class
        arguments = {
            'output': 'tCl pCl lCl',
            'lensing': 'yes',
            'modes': 's, t',
            'l_max_scalars': self.l_max,
            'l_max_tensors': self.l_max,}
        self.need_cosmo_arguments(data, arguments)
コード例 #17
0
ファイル: __init__.py プロジェクト: wilmarcardonac/dea
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # are there conflicting experiments?
        conflicting_experiments = [
            'bao', 'bao_boss', 'bao_known_rs'
            'bao_boss_aniso', 'bao_boss_aniso_gauss_approx'
        ]
        for experiment in conflicting_experiments:
            if experiment in data.experiments:
                raise io_mp.LikelihoodError('conflicting BAO measurments')

        # define arrays for values of z and data points
        self.z = np.array([], 'float64')
        self.DM_rdfid_by_rd_in_Mpc = np.array([], 'float64')
        self.H_rd_by_rdfid_in_km_per_s_per_Mpc = np.array([], 'float64')

        # read redshifts and data points
        with open(os.path.join(self.data_directory, self.data_file),
                  'r') as filein:
            for i, line in enumerate(filein):
                if line.strip() and line.find('#') == -1:
                    this_line = line.split()
                    # load redshifts and D_M * (r_s / r_s_fid)^-1 in Mpc
                    if this_line[1] == 'dM(rsfid/rs)':
                        self.z = np.append(self.z, float(this_line[0]))
                        self.DM_rdfid_by_rd_in_Mpc = np.append(
                            self.DM_rdfid_by_rd_in_Mpc, float(this_line[2]))
                    # load H(z) * (r_s / r_s_fid) in km s^-1 Mpc^-1
                    elif this_line[1] == 'Hz(rs/rsfid)':
                        self.H_rd_by_rdfid_in_km_per_s_per_Mpc = np.append(
                            self.H_rd_by_rdfid_in_km_per_s_per_Mpc,
                            float(this_line[2]))

        # read covariance matrix
        self.cov_data = np.loadtxt(
            os.path.join(self.data_directory, self.cov_file))

        # number of bins
        self.num_bins = np.shape(self.z)[0]

        # number of data points
        self.num_points = np.shape(self.cov_data)[0]
コード例 #18
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)
        # needed arguments in order to get derived parameters
        # should be made into an if loop
        # self.need_cosmo_arguments(data, {'output': 'mPk'})
        # self.need_cosmo_arguments(data, {'P_k_max_h/Mpc': 20.})
        # self.need_cosmo_arguments(data, {'z_max_pk': 3.})

        # define array for values of z and data points
        self.z = np.array([self.zeff], 'float64')
        scan_locations = self.data_directory + '/' + self.cf_scan

        # number of data points
        self.num_points = np.shape(self.z)[0]

        #Make our interpolators
        self.chi2_interpolators = util.chi2_interpolators(
            scan_locations, self.transverse_fid, self.parallel_fid)
コード例 #19
0
ファイル: __init__.py プロジェクト: jngrb/montepython_public
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # exclude the isotropic CMASS experiment when the anisotrpic
        # measurement is also used
        exclude_isotropic_CMASS = False

        conflicting_experiments = [
            'bao_boss_aniso', 'bao_boss_aniso_gauss_approx']
        for experiment in conflicting_experiments:
            if experiment in data.experiments:
                exclude_isotropic_CMASS = True

        if exclude_isotropic_CMASS:
            warnings.warn("excluding isotropic CMASS measurement")
            if not hasattr(self, 'exclude') or self.exclude == None:
                self.exclude = ['CMASS']
            else:
                self.exclude.append('CMASS')

        # define array for values of z and data points
        self.z = np.array([], 'float64')
        self.data = np.array([], 'float64')
        self.error = np.array([], 'float64')
        self.type = np.array([], 'int')

        # read redshifts and data points
        with open(os.path.join(self.data_directory, self.file), 'r') as filein:
            for line in filein:
                if line.find('#') == -1:
                    # the first entry of the line is the identifier
                    this_line = line.split()
                    # insert into array if this id is not manually excluded
                    if not this_line[0] in self.exclude:
                        self.z = np.append(self.z, float(this_line[1]))
                        self.data = np.append(self.data, float(this_line[2]))
                        self.error = np.append(self.error, float(this_line[3]))
                        self.type = np.append(self.type, int(this_line[4]))

        # number of data points
        self.num_points = np.shape(self.z)[0]
コード例 #20
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # define array for values of z and data points
        self.z = np.array([], 'float64')
        self.data = np.array([], 'float64')
        self.error = np.array([], 'float64')
        self.type = np.array([], 'int')

        # read redshifts and data points
        for line in open(os.path.join(self.data_directory, self.file), 'r'):
            if (line.find('#') == -1):
                self.z = np.append(self.z, float(line.split()[0]))
                self.data = np.append(self.data, float(line.split()[1]))
                self.error = np.append(self.error, float(line.split()[2]))
                self.type = np.append(self.type, int(line.split()[3]))

        # number of data points
        self.num_points = np.shape(self.z)[0]
コード例 #21
0
    def __init__(self, path, data, command_line):

        # Standard initialization, reads the .data
        Likelihood.__init__(self, path, data, command_line)

        # Extra needed cosmological paramters
        self.need_cosmo_arguments(data, {
            'output': 'tCl pCl lCl',
            'lensing': 'yes'
        })

        try:
            import pywlik
        except ImportError:
            raise io_mp.MissingLibraryError(
                "You must first activate the binaries from the wmap wrapper." +
                " Please run : \n " +
                "]$ source /path/to/wrapper_wmap/bin/clik_profile.sh\n " +
                "and try again.")

        # try importing the wrapper_wmap
        self.wmaplike = pywlik.wlik(self.large_data_directory, self.ttmin,
                                    self.ttmax, self.temin, self.temax,
                                    self.use_gibbs, self.use_lowlpol)

        # self.cls = np.loadtxt(self.cl_test_file)

        # loglike = self.wmaplike(self.cls)
        # print "got %g expected %g"%(loglike,-845.483)

        self.l_max = max(self.ttmax, self.temax)
        self.need_cosmo_arguments(data, {'l_max_scalars': self.l_max})

        # deal with nuisance parameters
        try:
            self.use_nuisance
        except:
            self.use_nuisance = []
        self.read_contamination_spectra(data)

        pass
コード例 #22
0
ファイル: __init__.py プロジェクト: wilmarcardonac/dea
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # define array for values of z and data points
        self.z = np.array([], 'float64')
        self.moduli = np.array([], 'float64')

        # read redshifts and data points
        for line in open(os.path.join(
                self.data_directory, self.z_mu_dmu), 'r'):
            if (line.find('#') == -1):
                self.z = np.append(self.z, float(line.split()[1]))
                self.moduli = np.append(self.moduli, float(line.split()[2]))

        # number of data points
        self.num_points = np.shape(self.z)[0]

        # define correlation m,atrix
        covmat = np.zeros((self.num_points, self.num_points), 'float64')

        # file containing correlation matrix
        if self.has_syscovmat:
            covmat_filename = self.covmat_sys
        else:
            covmat_filename = self.covmat_nosys

        # read correlation matrix
        i = 0
        for line in open(os.path.join(
                self.data_directory, covmat_filename), 'r'):
            if (line.find('#') == -1):
                covmat[i] = line.split()
                i += 1

        # invert correlation matrix
        self.inv_covmat = np.linalg.inv(covmat)

        # find sum of all matrix elements (sounds odd that there is
        # not a trace here instead, but this is correct!)
        self.inv_covmat_sum = np.sum(self.inv_covmat)
コード例 #23
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # define array for values of z and data points
        self.z = np.array([], 'float64')
        self.data = np.array([], 'float64')
        self.error = np.array([], 'float64')
        self.type = np.array([], 'int')

        # read redshifts and data points
        for line in open(os.path.join(
                self.data_directory, self.file), 'r'):
            if (line.strip().find('#') == -1) and (len(line.strip())>0):
                self.z = np.append(self.z, float(line.split()[0]))
                self.data = np.append(self.data, float(line.split()[1]))
                self.error = np.append(self.error, float(line.split()[2]))
                self.type = np.append(self.type, int(line.split()[3]))

        # number of data points
        self.num_points = np.shape(self.z)[0]
コード例 #24
0
ファイル: __init__.py プロジェクト: B-Rich/montepython_public
    def __init__(self, path, data, command_line):

        # Standard initialization, reads the .data
        Likelihood.__init__(self, path, data, command_line)

        # Extra needed cosmological paramters
        self.need_cosmo_arguments(
            data, {'output': 'tCl pCl lCl', 'lensing': 'yes'})

        try:
            import pywlik
        except ImportError:
            raise io_mp.MissingLibraryError(
                "You must first activate the binaries from the wmap wrapper." +
                " Please run : \n " +
                "]$ source /path/to/wrapper_wmap/bin/clik_profile.sh\n " +
                "and try again.")

        # try importing the wrapper_wmap
        self.wmaplike = pywlik.wlik(self.large_data_directory, self.ttmin,
                                    self.ttmax, self.temin, self.temax,
                                    self.use_gibbs, self.use_lowlpol)

        # self.cls = np.loadtxt(self.cl_test_file)

        # loglike = self.wmaplike(self.cls)
        # print "got %g expected %g"%(loglike,-845.483)

        self.l_max = max(self.ttmax, self.temax)
        self.need_cosmo_arguments(data, {'l_max_scalars': self.l_max})

        # deal with nuisance parameters
        try:
            self.use_nuisance
        except:
            self.use_nuisance = []
        self.read_contamination_spectra(data)

        pass
コード例 #25
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        self.need_cosmo_arguments(
            data, {
                'lensing': 'yes', 
                'output': 'tCl lCl pCl', 
                'l_max_scalars': 6000,
                'modes': 's',
                'non linear': 'halofit'
                })

        self.need_update = True
        self.use_nuisance = ['yp']
        self.nuisance = ['yp']
        
        print "PATH", self.path
        self.act = act_like.ACTPol_s2(self.actdata)

        # \ell values 2, 3, ... 6000
        self.xx = np.array(range(2,6001))
コード例 #26
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # define array for values of z and data points
        self.z = np.array([], "float64")
        self.data = np.array([], "float64")
        self.error = np.array([], "float64")
        self.type = np.array([], "int")

        # read redshifts and data points
        for line in open(os.path.join(self.data_directory, self.file), "r"):
            if line.find("#") == -1:
                self.z = np.append(self.z, float(line.split()[0]))
                self.data = np.append(self.data, float(line.split()[1]))
                self.error = np.append(self.error, float(line.split()[2]))
                self.type = np.append(self.type, int(line.split()[3]))

        # number of data points
        self.num_points = np.shape(self.z)[0]

        for i in range(self.num_points):
            if self.type[i] == 3:
                # print 'before modification'
                # print 'data: %g, error: %g' % (self.data[i], self.error[i])
                # print 'relative error: %g' % (self.error[i]/self.data[i])
                # print 'known_rs: %g, rs_error: %g' % (
                # self.known_rs, self.rs_error)
                # print 'relative error: %g' % (self.rs_error/self.known_rs)
                self.data[i] = self.data[i] * self.known_rs * self.rs_rescale
                self.error[i] = self.data[i] * sqrt(
                    (self.error[i] * self.known_rs * self.rs_rescale / self.data[i]) ** 2
                    + (self.rs_error / self.known_rs) ** 2
                )
                # print 'after modification'
                # print 'data: %g, error: %g' % (self.data[i], self.error[i])
                # print 'relative error: %g' % (self.error[i]/self.data[i])
                # print
                self.type[i] = 4
コード例 #27
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        self.need_cosmo_arguments(
            data,
            {
                "lensing": "yes",
                "output": "tCl lCl pCl",
                "l_max_scalars": 6000,
                "modes": "s",
            },
        )

        self.need_update = True
        self.use_nuisance = ["yp2"]
        self.nuisance = ["yp2"]

        self.act = pyactlike.ACTPowerSpectrumData()

        # \ell values 2, 3, ... 6000
        self.xx = np.array(range(2, 6001))
コード例 #28
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # Read the four data points from the bandpower file
        self.bandpowers = np.loadtxt(os.path.join(
            self.data_directory, self.bandpower_file))

        # Read the band power window function (bpwf hereafter... yes, but
        # sometimes, explicit is too much)
        self.bpwf = self.load_bandpower_window_function(os.path.join(
            self.data_directory, self.bpwf_file))

        # l_max is now read from the bandpower window functions
        self.l_max = int(self.bpwf[0][-1, 0])

        # Require polarization from class
        arguments = {
            'output': 'tCl pCl lCl',
            'lensing': 'yes',
            'l_max_scalars': self.l_max}
        self.need_cosmo_arguments(data, arguments)
コード例 #29
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # define array for values of z and data points
        self.corr_types = []
        self.z = np.array([], 'float64')
        self.types = []

        #Uncomment if run on its own
        # self.need_cosmo1_arguments(data, {'output': 'mPk'})
        # self.need_cosmo1_arguments(data, {'P_k_max_h/Mpc': '1.'})
        # self.need_cosmo1_arguments(data, {'z_max_pk': '1.'})
        # self.need_cosmo2_arguments(data, {'output': 'mPk'})
        # self.need_cosmo2_arguments(data, {'P_k_max_h/Mpc': '1.'})
        # self.need_cosmo2_arguments(data, {'z_max_pk': '1.'})

        scan_locations = {}
        scan_locations['comb'] = self.data_directory + '/' + self.cf_scan

        # read redshifts and data points
        for line in open(os.path.join(self.data_directory, self.file), 'r'):
            if (line.strip().find('#')
                    == -1) and (len(line.strip()) > 0) and (line.split()[0]
                                                            == 'comb'):
                self.corr_types += [line.split()[0]]
                self.z = np.append(self.z, float(line.split()[1]))
                self.types += [
                    set([int(line.split()[2]),
                         int(line.split()[3])])
                ]

        # number of data points
        self.num_points = np.shape(self.z)[0]

        #Make our interpolators
        self.chi2_interpolators = chi2_interpolators(scan_locations,
                                                     self.transverse_fid,
                                                     self.parallel_fid)
コード例 #30
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # Read the four data points from the bandpower file
        self.bandpowers = np.loadtxt(os.path.join(
            self.data_directory, self.bandpower_file))

        # Read the band power window function (bpwf hereafter... yes, but
        # sometimes, explicit is too much)
        self.bpwf = self.load_bandpower_window_function(os.path.join(
            self.data_directory, self.bpwf_file))

        # l_max is now read from the bandpower window functions
        self.l_max = int(self.bpwf[0][-1, 0])

        # Require polarization from class
        arguments = {
            'output': 'tCl pCl lCl',
            'lensing': 'yes',
            'l_max_scalars': self.l_max}
        self.need_cosmo_arguments(data, arguments)
コード例 #31
0
ファイル: __init__.py プロジェクト: jlvdb/montepython_public
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # define array for values of z and data points
        self.z = np.array([], 'float64')
        self.data = np.array([], 'float64')
        self.error = np.array([], 'float64')
        self.type = np.array([], 'int')

        # read redshifts and data points
        for line in open(os.path.join(
                self.data_directory, self.file), 'r'):
            if (line.find('#') == -1):
                self.z = np.append(self.z, float(line.split()[0]))
                self.data = np.append(self.data, float(line.split()[1]))
                self.error = np.append(self.error, float(line.split()[2]))
                self.type = np.append(self.type, int(line.split()[3]))

        # number of data points
        self.num_points = np.shape(self.z)[0]

        for i in range(self.num_points):
            if self.type[i] == 3:
                #print('before modification')
                #print('data: %g, error: %g' % (self.data[i], self.error[i]))
                #print('relative error: %g' % (self.error[i]/self.data[i]))
                #print('known_rs: %g, rs_error: %g' % (
                    #self.known_rs, self.rs_error))
                #print('relative error: %g' % (self.rs_error/self.known_rs))
                self.data[i] = self.data[i] * self.known_rs * self.rs_rescale
                self.error[i] = self.data[i] * sqrt(
                    (self.error[i]*self.known_rs*self.rs_rescale / self.data[i]) ** 2 + (self.rs_error / self.known_rs) ** 2)
                #print('after modification')
                #print('data: %g, error: %g' % (self.data[i], self.error[i]))
                #print('relative error: %g' % (self.error[i]/self.data[i]))
                #print()
                self.type[i] = 4
コード例 #32
0
    def __init__(self, path, data, command_line):
        Likelihood.__init__(self, path, data, command_line)
        self.zl = np.array([], 'float64')
        self.da = np.array([], 'float64')
        # read redshifts and data points
        for line in open(os.path.join(self.data_directory, self.data), 'r'):
            if (line.find('#') == -1):
                self.zl = np.append(self.zl, float(line.split()[0]))
                self.da = np.append(self.da, float(line.split()[1]))
#                self.da_error = np.append(self.da_error, float(line.split()[3]))

# number of data points
        self.num_points = np.shape(self.zl)[0]
        print self.num_points
        # define correlation m,atrix
        lognorm_params = np.zeros((self.num_points, 3), 'float64')

        # file containing correlation matrix
        if self.has_syscovmat:
            param_filename = self.covmat_sys
        else:
            param_filename = self.covmat_nosys

    # read correlation matrix
        i = 0
        for line in open(os.path.join(self.data_directory, param_filename),
                         'r'):
            if (line.find('#') == -1):
                lognorm_params[i] = line.split()
                i += 1
        print lognorm_params
        self.shape = np.zeros((self.num_points), 'float64')
        self.loc = np.zeros((self.num_points), 'float64')
        self.scale = np.zeros((self.num_points), 'float64')
        for m in range(self.num_points):
            self.shape[m] = lognorm_params[m][0]
            self.loc[m] = lognorm_params[m][1]
            self.scale[m] = lognorm_params[m][2]
コード例 #33
0
ファイル: __init__.py プロジェクト: wilmarcardonac/dea
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # define array for values of z and data points
        self.zd = np.array([], 'float64')
        self.zs = np.array([], 'float64')
        self.lambda_d = np.array([], 'float64')
        self.mu_d = np.array([], 'float64')
        self.sigma_d = np.array([], 'float64')

        # read redshifts and data points
        for line in open(os.path.join(self.data_directory, self.file), 'r'):
            if (line.find("#") == -1):
                self.zd = np.append(self.zd, float(line.split()[0]))
                self.zs = np.append(self.zs, float(line.split()[1]))
                self.lambda_d = np.append(self.lambda_d,
                                          float(line.split()[2]))
                self.mu_d = np.append(self.mu_d, float(line.split()[3]))
                self.sigma_d = np.append(self.sigma_d, float(line.split()[4]))

        # number of data points
        self.num_points = np.shape(self.zd)[0]
コード例 #34
0
ファイル: __init__.py プロジェクト: B-Rich/montepython_public
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # define array for values of z and data points
        self.zd = np.array([], 'float64')
        self.zs = np.array([], 'float64')
        self.lambda_d = np.array([], 'float64')
        self.mu_d = np.array([], 'float64')
        self.sigma_d = np.array([], 'float64')

        # read redshifts and data points
        for line in open(os.path.join(
                self.data_directory, self.file), 'r'):
            if (line.find("#") == -1):
                self.zd = np.append(self.zd, float(line.split()[0]))
                self.zs = np.append(self.zs, float(line.split()[1]))
                self.lambda_d = np.append(
                    self.lambda_d, float(line.split()[2]))
                self.mu_d = np.append(self.mu_d, float(line.split()[3]))
                self.sigma_d = np.append(self.sigma_d, float(line.split()[4]))

        # number of data points
        self.num_points = np.shape(self.zd)[0]
コード例 #35
0
ファイル: __init__.py プロジェクト: alulujasmine/gambit_1.5
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)
コード例 #36
0
ファイル: __init__.py プロジェクト: jlvdb/montepython_public
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        print("Initializing Lya likelihood")

        self.need_cosmo_arguments(data, {'output': 'mPk'})
        self.need_cosmo_arguments(data, {'P_k_max_h/Mpc': 1.5 * self.kmax})

        # number of grid points for the lcdm case (i.e. alpha=0, regardless of beta and gamma values), not needed
        #lcdm_points = 33
        # number of non-astro params (i.e. alpha, beta, and gamma)
        self.params_numbers = 3

        alphas = np.zeros(self.grid_size, 'float64')
        betas = np.zeros(self.grid_size, 'float64')
        gammas = np.zeros(self.grid_size, 'float64')

        # Derived_lkl is a new type of derived parameter calculated in the likelihood, and not known to class.
        # This first initialising avoids problems in the case of an error in the first point of the MCMC
        data.derived_lkl = {'alpha': 0, 'beta': 0, 'gamma': 0, 'lya_neff': 0}

        self.bin_file_path = os.path.join(command_line.folder,
                                          self.bin_file_name)
        if not os.path.exists(self.bin_file_path):
            with open(self.bin_file_path, 'w') as bin_file:
                bin_file.write('#')
                for name in data.get_mcmc_parameters(['varying']):
                    name = re.sub('[$*&]', '', name)
                    bin_file.write(' %s\t' % name)
                for name in data.get_mcmc_parameters(['derived']):
                    name = re.sub('[$*&]', '', name)
                    bin_file.write(' %s\t' % name)
                for name in data.get_mcmc_parameters(['derived_lkl']):
                    name = re.sub('[$*&]', '', name)
                    bin_file.write(' %s\t' % name)
                bin_file.write('\n')
                bin_file.close()
        if 'z_reio' not in data.get_mcmc_parameters([
                'derived'
        ]) or 'sigma8' not in data.get_mcmc_parameters(['derived']):
            raise io_mp.ConfigurationError(
                'Error: Lya likelihood need z_reio and sigma8 as derived parameters'
            )

        file_path = os.path.join(self.data_directory, self.grid_file)
        if os.path.exists(file_path):
            with open(file_path, 'r') as grid_file:
                line = grid_file.readline()
                while line.find('#') != -1:
                    line = grid_file.readline()
                while (line.find('\n') != -1 and len(line) == 3):
                    line = grid_file.readline()
                for index in range(self.grid_size):
                    alphas[index] = float(line.split()[0])
                    betas[index] = float(line.split()[1])
                    gammas[index] = float(line.split()[2])
                    line = grid_file.readline()
                grid_file.close()
        else:
            raise io_mp.ConfigurationError('Error: grid file is missing')

        # Real parameters
        X_real = np.zeros((self.grid_size, self.params_numbers), 'float64')

        for k in range(self.grid_size):
            X_real[k][0] = self.khalf(alphas[k], betas[k],
                                      gammas[k])  # Here we use k_1/2
            X_real[k][1] = betas[k]
            X_real[k][2] = gammas[k]

        # For the normalization
        self.a_min = min(X_real[:, 0])
        self.b_min = min(X_real[:, 1])
        self.g_min = min(X_real[:, 2])
        self.a_max = max(X_real[:, 0])
        self.b_max = max(X_real[:, 1])
        self.g_max = max(X_real[:, 2])

        # Redshift independent parameters - params order: z_reio, sigma_8, n_eff, f_UV
        self.zind_param_size = [3, 5, 5,
                                3]  # How many values we have for each param
        self.zind_param_min = np.array([7., 0.5, -2.6, 0.])
        self.zind_param_max = np.array([15., 1.5, -2.0, 1.])
        zind_param_ref = np.array([9., 0.829, -2.3074, 0.])
        self.zreio_range = self.zind_param_max[0] - self.zind_param_min[0]
        self.neff_range = self.zind_param_max[2] - self.zind_param_min[2]

        # Redshift dependent parameters - params order: params order: mean_f, t0, slope
        zdep_params_size = [9, 3, 3]  # How many values we have for each param
        zdep_params_refpos = [4, 1, 2]  # Where to store the P_F(ref) DATA

        # Mean flux values
        flux_ref_old = (np.array([
            0.669181, 0.617042, 0.564612, 0.512514, 0.461362, 0.411733,
            0.364155, 0.253828, 0.146033, 0.0712724
        ]))
        # Older, not used values
        #flux_min_meanf = (np.array([0.401509, 0.370225, 0.338767, 0.307509, 0.276817, 0.24704, 0.218493, 0.152297, 0.0876197, 0.0427634]))
        #flux_max_meanf = (np.array([0.936854, 0.863859, 0.790456, 0.71752, 0.645907, 0.576426, 0.509816, 0.355359, 0.204446, 0.0997813]))

        # Manage the data sets
        # FIRST (NOT USED) DATASET (19 wavenumbers) ***XQ-100***
        self.zeta_range_XQ = [
            3.0, 3.2, 3.4, 3.6, 3.8, 4.0, 4.2
        ]  # List of redshifts corresponding to the 19 wavenumbers (k)
        self.k_XQ = [
            0.003, 0.006, 0.009, 0.012, 0.015, 0.018, 0.021, 0.024, 0.027,
            0.03, 0.033, 0.036, 0.039, 0.042, 0.045, 0.048, 0.051, 0.054, 0.057
        ]

        # SECOND DATASET (7 wavenumbers) ***HIRES/MIKE***
        self.zeta_range_mh = [
            4.2, 4.6, 5.0, 5.4
        ]  # List of redshifts corresponding to the 7 wavenumbers (k)
        self.k_mh = [
            0.00501187, 0.00794328, 0.0125893, 0.0199526, 0.0316228, 0.0501187,
            0.0794328
        ]  # Note that k is in s/km

        self.zeta_full_length = (len(self.zeta_range_XQ) +
                                 len(self.zeta_range_mh))
        self.kappa_full_length = (len(self.k_XQ) + len(self.k_mh))

        # Which snapshots we use (first 7 for first dataset, last 4 for second one)
        self.redshift = [3.0, 3.2, 3.4, 3.6, 3.8, 4.0, 4.2, 4.2, 4.6, 5.0, 5.4]

        #T 0 and slope values
        t0_ref_old = np.array([
            11251.5, 11293.6, 11229.0, 10944.6, 10421.8, 9934.49, 9227.31,
            8270.68, 7890.68, 7959.4
        ])
        slope_ref_old = np.array([
            1.53919, 1.52894, 1.51756, 1.50382, 1.48922, 1.47706, 1.46909,
            1.48025, 1.50814, 1.52578
        ])

        t0_values_old = np.zeros((10, zdep_params_size[1]), 'float64')
        t0_values_old[:, 0] = np.array([
            7522.4, 7512.0, 7428.1, 7193.32, 6815.25, 6480.96, 6029.94,
            5501.17, 5343.59, 5423.34
        ])
        t0_values_old[:, 1] = t0_ref_old[:]
        t0_values_old[:, 2] = np.array([
            14990.1, 15089.6, 15063.4, 14759.3, 14136.3, 13526.2, 12581.2,
            11164.9, 10479.4, 10462.6
        ])

        slope_values_old = np.zeros((10, zdep_params_size[2]), 'float64')
        slope_values_old[:, 0] = np.array([
            0.996715, 0.979594, 0.960804, 0.938975, 0.915208, 0.89345,
            0.877893, 0.8884, 0.937664, 0.970259
        ])
        slope_values_old[:, 1] = [
            1.32706, 1.31447, 1.30014, 1.28335, 1.26545, 1.24965, 1.2392,
            1.25092, 1.28657, 1.30854
        ]
        slope_values_old[:, 2] = slope_ref_old[:]

        self.t0_min = t0_values_old[:, 0] * 0.1
        self.t0_max = t0_values_old[:, 2] * 1.4
        self.slope_min = slope_values_old[:, 0] * 0.8
        self.slope_max = slope_values_old[:, 2] * 1.15

        # Import the two grids for Kriging
        file_path = os.path.join(self.data_directory, self.astro_spectra_file)
        if os.path.exists(file_path):
            try:
                pkl = open(file_path, 'rb')
                self.input_full_matrix_interpolated_ASTRO = pickle.load(pkl)
            except UnicodeDecodeError as e:
                pkl = open(file_path, 'rb')
                self.input_full_matrix_interpolated_ASTRO = pickle.load(
                    pkl, encoding='latin1')
            pkl.close()
        else:
            raise io_mp.ConfigurationError(
                'Error: astro spectra file is missing')

        file_path = os.path.join(self.data_directory, self.abg_spectra_file)
        if os.path.exists(file_path):
            try:
                pkl = open(file_path, 'rb')
                self.input_full_matrix_interpolated_ABG = pickle.load(pkl)
            except UnicodeDecodeError as e:
                pkl = open(file_path, 'rb')
                self.input_full_matrix_interpolated_ABG = pickle.load(
                    pkl, encoding='latin1')
            pkl.close()
        else:
            raise io_mp.ConfigurationError(
                'Error: abg spectra file is missing')

        ALL_zdep_params = len(flux_ref_old) + len(t0_ref_old) + len(
            slope_ref_old)
        grid_length_ABG = len(self.input_full_matrix_interpolated_ABG[0, 0, :])
        grid_length_ASTRO = len(
            self.input_full_matrix_interpolated_ASTRO[0, 0, :])
        astroparams_number_KRIG = len(self.zind_param_size) + ALL_zdep_params

        # Import the ABG GRID (alpha, beta, gamma)
        file_path = os.path.join(self.data_directory, self.abg_grid_file)
        if os.path.exists(file_path):
            self.X_ABG = np.zeros((grid_length_ABG, self.params_numbers),
                                  'float64')
            for param_index in range(self.params_numbers):
                self.X_ABG[:,
                           param_index] = np.genfromtxt(file_path,
                                                        usecols=[param_index],
                                                        skip_header=1)
        else:
            raise io_mp.ConfigurationError('Error: abg grid file is missing')

        # Import the ASTRO GRID (ordering of params: z_reio, sigma_8, n_eff, f_UV, mean_f(z), t0(z), slope(z))
        file_path = os.path.join(self.data_directory, self.abg_astro_grid_file)
        if os.path.exists(file_path):
            self.X = np.zeros((grid_length_ASTRO, astroparams_number_KRIG),
                              'float64')
            for param_index in range(astroparams_number_KRIG):
                self.X[:, param_index] = np.genfromtxt(file_path,
                                                       usecols=[param_index],
                                                       skip_header=1)
        else:
            raise io_mp.ConfigurationError(
                'Error: abg+astro grid file is missing')

        # Prepare the interpolation in astro-param space
        self.redshift_list = np.array([
            3.0, 3.2, 3.4, 3.6, 3.8, 4.0, 4.2, 4.6, 5.0, 5.4
        ])  # This corresponds to the combined dataset (MIKE/HIRES + XQ-100)
        self.F_prior_min = np.array([
            0.535345, 0.493634, 0.44921, 0.392273, 0.338578, 0.28871, 0.218493,
            0.146675, 0.0676442, 0.0247793
        ])
        self.F_prior_max = np.array([
            0.803017, 0.748495, 0.709659, 0.669613, 0.628673, 0.587177,
            0.545471, 0.439262, 0.315261, 0.204999
        ])

        # Load the data
        if not self.DATASET == "mike-hires":
            raise io_mp.LikelihoodError(
                'Error: for the time being, only the mike - hires dataset is available'
            )

        file_path = os.path.join(self.data_directory, self.MIKE_spectra_file)
        if os.path.exists(file_path):
            try:
                pkl = open(file_path, 'rb')
                y_M_reshaped = pickle.load(pkl)
            except UnicodeDecodeError as e:
                pkl = open(file_path, 'rb')
                y_M_reshaped = pickle.load(pkl, encoding='latin1')
            pkl.close()
        else:
            raise io_mp.ConfigurationError(
                'Error: MIKE spectra file is missing')

        file_path = os.path.join(self.data_directory, self.HIRES_spectra_file)
        if os.path.exists(file_path):
            try:
                pkl = open(file_path, 'rb')
                y_H_reshaped = pickle.load(pkl)
            except UnicodeDecodeError as e:
                pkl = open(file_path, 'rb')
                y_H_reshaped = pickle.load(pkl, encoding='latin1')
            pkl.close()
        else:
            raise io_mp.ConfigurationError(
                'Error: HIRES spectra file is missing')

        file_path = os.path.join(self.data_directory, self.MIKE_cov_file)
        if os.path.exists(file_path):
            try:
                pkl = open(file_path, 'rb')
                cov_M_inverted = pickle.load(pkl)
            except UnicodeDecodeError as e:
                pkl = open(file_path, 'rb')
                cov_M_inverted = pickle.load(pkl, encoding='latin1')
            pkl.close()
        else:
            raise io_mp.ConfigurationError(
                'Error: MIKE covariance matrix file is missing')

        file_path = os.path.join(self.data_directory, self.HIRES_cov_file)
        if os.path.exists(file_path):
            try:
                pkl = open(file_path, 'rb')
                cov_H_inverted = pickle.load(pkl)
            except UnicodeDecodeError as e:
                pkl = open(file_path, 'rb')
                cov_H_inverted = pickle.load(pkl, encoding='latin1')
            pkl.close()
        else:
            raise io_mp.ConfigurationError(
                'Error: HIRES covariance matrix file is missing')

        file_path = os.path.join(self.data_directory, self.PF_noPRACE_file)
        if os.path.exists(file_path):
            try:
                pkl = open(file_path, 'rb')
                self.PF_noPRACE = pickle.load(pkl)
            except UnicodeDecodeError as e:
                pkl = open(file_path, 'rb')
                self.PF_noPRACE = pickle.load(pkl, encoding='latin1')
            pkl.close()
        else:
            raise io_mp.ConfigurationError('Error: PF_noPRACE file is missing')

        self.cov_MH_inverted = block_diag(cov_H_inverted, cov_M_inverted)
        self.y_MH_reshaped = np.concatenate((y_H_reshaped, y_M_reshaped))

        print("Initialization of Lya likelihood done")
コード例 #37
0
ファイル: __init__.py プロジェクト: jlvdb/montepython_public
    def __init__(self, path, data, command_line):
        # I should already take care of using only GRF mocks or data here (because of different folder-structures etc...)
        # or for now just write it for GRFs for tests and worry about it later...
        Likelihood.__init__(self, path, data, command_line)

        # Check if the data can be found
        try:
            fname = os.path.join(self.data_directory,
                                 'Resetting_bias/parameters_B_mode_model.dat')
            parser_mp.existing_file(fname)
        except:
            raise io_mp.ConfigurationError(
                'KiDS-450 QE data not found. Download the data at '
                'http://kids.strw.leidenuniv.nl/sciencedata.php '
                'and specify path to data through the variable '
                'kids450_qe_likelihood_public.data_directory in '
                'the .data file. See README in likelihood folder '
                'for further instructions.')

        # TODO: this is also CFHTLenS legacy...
        # only relevant for GRFs!
        #dict_BWM = {'W1': 'G10_', 'W2': 'G126_', 'W3': 'G162_', 'W4': 'G84_'}

        self.need_cosmo_arguments(data, {'output': 'mPk'})

        self.redshift_bins = []
        for index_zbin in xrange(len(self.zbin_min)):
            redshift_bin = '{:.2f}z{:.2f}'.format(self.zbin_min[index_zbin],
                                                  self.zbin_max[index_zbin])
            self.redshift_bins.append(redshift_bin)

        # number of z-bins
        self.nzbins = len(self.redshift_bins)
        # number of *unique* correlations between z-bins
        self.nzcorrs = self.nzbins * (self.nzbins + 1) / 2

        all_bands_EE_to_use = []
        all_bands_BB_to_use = []
        '''
        if self.fit_cross_correlations_only:
            # mask out auto-spectra:
            for index_zbin1 in xrange(self.nzbins):
                for index_zbin2 in xrange(index_zbin1 + 1):
                    if index_zbin1 == index_zbin2:
                        all_bands_EE_to_use += np.zeros_like(self.bands_EE_to_use).tolist()
                        all_bands_BB_to_use += np.zeros_like(self.bands_BB_to_use).tolist()
                    else:
                        all_bands_EE_to_use += self.bands_EE_to_use
                        all_bands_BB_to_use += self.bands_BB_to_use

        else:
            # default, use all correlations:
            for i in xrange(self.nzcorrs):
                all_bands_EE_to_use += self.bands_EE_to_use
                all_bands_BB_to_use += self.bands_BB_to_use
        '''
        # default, use all correlations:
        for i in xrange(self.nzcorrs):
            all_bands_EE_to_use += self.bands_EE_to_use
            all_bands_BB_to_use += self.bands_BB_to_use

        all_bands_to_use = np.concatenate(
            (all_bands_EE_to_use, all_bands_BB_to_use))
        self.indices_for_bands_to_use = np.where(
            np.asarray(all_bands_to_use) == 1)[0]

        # this is also the number of points in the datavector
        ndata = len(self.indices_for_bands_to_use)

        # I should load all the data needed only once, i.e. HERE:
        # not so sure about statement above, I have the feeling "init" is called for every MCMC step...
        # maybe that's why the memory is filling up on other machines?! --> nope, that wasn't the reason...
        start_load = time.time()

        if self.correct_resetting_bias:
            fname = os.path.join(self.data_directory,
                                 'Resetting_bias/parameters_B_mode_model.dat')
            A_B_modes, exp_B_modes, err_A_B_modes, err_exp_B_modes = np.loadtxt(
                fname, unpack=True)
            self.params_resetting_bias = np.array([A_B_modes, exp_B_modes])
            fname = os.path.join(self.data_directory,
                                 'Resetting_bias/covariance_B_mode_model.dat')
            self.cov_resetting_bias = np.loadtxt(fname)

        # try to load fiducial m-corrections from file (currently these are global values over full field, hence no looping over fields required for that!)
        # TODO: Make output dependent on field, not necessary for current KiDS approach though!
        try:
            fname = os.path.join(
                self.data_directory,
                '{:}zbins/m_correction_avg.txt'.format(self.nzbins))
            if self.nzbins == 1:
                self.m_corr_fiducial_per_zbin = np.asarray(
                    [np.loadtxt(fname, usecols=[1])])
            else:
                self.m_corr_fiducial_per_zbin = np.loadtxt(fname, usecols=[1])
        except:
            self.m_corr_fiducial_per_zbin = np.zeros(self.nzbins)
            print('Could not load m-correction values from \n', fname)
            print('Setting them to zero instead.')

        try:
            fname = os.path.join(
                self.data_directory,
                '{:}zbins/sigma_int_n_eff_{:}zbins.dat'.format(
                    self.nzbins, self.nzbins))
            tbdata = np.loadtxt(fname)
            if self.nzbins == 1:
                # correct columns for file!
                sigma_e1 = np.asarray([tbdata[2]])
                sigma_e2 = np.asarray([tbdata[3]])
                n_eff = np.asarray([tbdata[4]])
            else:
                # correct columns for file!
                sigma_e1 = tbdata[:, 2]
                sigma_e2 = tbdata[:, 3]
                n_eff = tbdata[:, 4]

            self.sigma_e = np.sqrt((sigma_e1**2 + sigma_e2**2) / 2.)
            # convert from 1 / sq. arcmin to 1 / sterad
            self.n_eff = n_eff / np.deg2rad(1. / 60.)**2
        except:
            # these dummies will set noise power always to 0!
            self.sigma_e = np.zeros(self.nzbins)
            self.n_eff = np.ones(self.nzbins)
            print('Could not load sigma_e and n_eff!')

        collect_bp_EE_in_zbins = []
        collect_bp_BB_in_zbins = []
        # collect BP per zbin and combine into one array
        for zbin1 in xrange(self.nzbins):
            for zbin2 in xrange(zbin1 + 1):  #self.nzbins):
                # zbin2 first in fname!
                fname_EE = os.path.join(
                    self.data_directory,
                    '{:}zbins/band_powers_EE_z{:}xz{:}.dat'.format(
                        self.nzbins, zbin1 + 1, zbin2 + 1))
                fname_BB = os.path.join(
                    self.data_directory,
                    '{:}zbins/band_powers_BB_z{:}xz{:}.dat'.format(
                        self.nzbins, zbin1 + 1, zbin2 + 1))
                extracted_band_powers_EE = np.loadtxt(fname_EE)
                extracted_band_powers_BB = np.loadtxt(fname_BB)
                collect_bp_EE_in_zbins.append(extracted_band_powers_EE)
                collect_bp_BB_in_zbins.append(extracted_band_powers_BB)

        self.band_powers = np.concatenate(
            (np.asarray(collect_bp_EE_in_zbins).flatten(),
             np.asarray(collect_bp_BB_in_zbins).flatten()))

        fname = os.path.join(
            self.data_directory,
            '{:}zbins/covariance_all_z_EE_BB.dat'.format(self.nzbins))
        self.covariance = np.loadtxt(fname)

        fname = os.path.join(
            self.data_directory,
            '{:}zbins/band_window_matrix_nell100.dat'.format(self.nzbins))
        self.band_window_matrix = np.loadtxt(fname)
        # ells_intp and also band_offset are consistent between different patches!

        fname = os.path.join(
            self.data_directory,
            '{:}zbins/multipole_nodes_for_band_window_functions_nell100.dat'.
            format(self.nzbins))
        self.ells_intp = np.loadtxt(fname)
        self.band_offset_EE = len(extracted_band_powers_EE)
        self.band_offset_BB = len(extracted_band_powers_BB)

        # Check if any of the n(z) needs to be shifted in loglkl by D_z{1...n}:
        self.shift_n_z_by_D_z = np.zeros(self.nzbins, 'bool')
        for zbin in xrange(self.nzbins):
            param_name = 'D_z{:}'.format(zbin + 1)
            if param_name in data.mcmc_parameters:
                self.shift_n_z_by_D_z[zbin] = True

        # Read fiducial dn_dz from window files:
        # TODO: the hardcoded z_min and z_max correspond to the lower and upper
        # endpoints of the shifted left-border histogram!
        z_samples = []
        hist_samples = []
        for zbin in xrange(self.nzbins):
            redshift_bin = self.redshift_bins[zbin]
            window_file_path = os.path.join(
                self.data_directory,
                '{:}/n_z_avg_{:}.hist'.format(self.photoz_method,
                                              redshift_bin))
            if os.path.exists(window_file_path):
                zptemp, hist_pz = np.loadtxt(window_file_path,
                                             usecols=[0, 1],
                                             unpack=True)
                shift_to_midpoint = np.diff(zptemp)[0] / 2.
                if zbin > 0:
                    zpcheck = zptemp
                    if np.sum((zptemp - zpcheck)**2) > 1e-6:
                        raise io_mp.LikelihoodError(
                            'The redshift values for the window files at different bins do not match.'
                        )
                print('Loaded n(zbin{:}) from: \n'.format(zbin + 1),
                      window_file_path)
                # we add a zero as first element because we want to integrate down to z = 0!
                z_samples += [
                    np.concatenate((np.zeros(1), zptemp + shift_to_midpoint))
                ]
                hist_samples += [np.concatenate((np.zeros(1), hist_pz))]
            else:
                raise io_mp.LikelihoodError("File not found:\n %s" %
                                            window_file_path)

        z_samples = np.asarray(z_samples)
        hist_samples = np.asarray(hist_samples)

        # prevent undersampling of histograms!
        if self.nzmax < len(zptemp):
            print(
                "You're trying to integrate at lower resolution than supplied by the n(z) histograms. \n Increase nzmax! Aborting now..."
            )
            exit()
        # if that's the case, we want to integrate at histogram resolution and need to account for
        # the extra zero entry added
        elif self.nzmax == len(zptemp):
            self.nzmax = z_samples.shape[1]
            # requires that z-spacing is always the same for all bins...
            self.redshifts = z_samples[0, :]
            print('Integrations performed at resolution of histogram!')
        # if we interpolate anyway at arbitrary resolution the extra 0 doesn't matter
        else:
            self.nzmax += 1
            self.redshifts = np.linspace(z_samples.min(), z_samples.max(),
                                         self.nzmax)
            print('Integration performed at set nzmax resolution!')

        self.pz = np.zeros((self.nzmax, self.nzbins))
        self.pz_norm = np.zeros(self.nzbins, 'float64')
        for zbin in xrange(self.nzbins):
            # we assume that the histograms loaded are given as left-border histograms
            # and that the z-spacing is the same for each histogram
            spline_pz = itp.splrep(z_samples[zbin, :], hist_samples[zbin, :])

            #z_mod = self.z_p
            mask_min = self.redshifts >= z_samples[zbin, :].min()
            mask_max = self.redshifts <= z_samples[zbin, :].max()
            mask = mask_min & mask_max
            # points outside the z-range of the histograms are set to 0!
            self.pz[mask, zbin] = itp.splev(self.redshifts[mask], spline_pz)
            # Normalize selection functions
            dz = self.redshifts[1:] - self.redshifts[:-1]
            self.pz_norm[zbin] = np.sum(
                0.5 * (self.pz[1:, zbin] + self.pz[:-1, zbin]) * dz)

        self.z_max = self.redshifts.max()

        # k_max is arbitrary at the moment, since cosmology module is not calculated yet...TODO
        if self.mode == 'halofit':
            self.need_cosmo_arguments(
                data, {
                    'z_max_pk': self.z_max,
                    'output': 'mPk',
                    'non linear': self.mode,
                    'P_k_max_h/Mpc': self.k_max_h_by_Mpc
                })
        else:
            self.need_cosmo_arguments(
                data, {
                    'z_max_pk': self.z_max,
                    'output': 'mPk',
                    'P_k_max_h/Mpc': self.k_max_h_by_Mpc
                })

        print('Time for loading all data files:', time.time() - start_load)

        fname = os.path.join(self.data_directory, 'number_datapoints.txt')
        np.savetxt(fname, [ndata],
                   header='number of datapoints in masked datavector')

        return
コード例 #38
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        self.need_cosmo_arguments(data, {'output': 'mPk'})
        self.need_cosmo_arguments(data, {'z_max_pk': self.zmax})
        self.need_cosmo_arguments(data, {'P_k_max_1/Mpc': 1.5*self.kmax})

        #################
        # find number of galaxies for each mean redshift value
        #################

        # Deduce the dz step from the number of bins and the edge values of z
        self.dz = (self.zmax-self.zmin)/(self.nbin-1.)

        # Compute the number of galaxies for each \bar z
        # For this, one needs dn/dz TODO
        # then n_g(\bar z) = int_{\bar z - dz/2}^{\bar z + dz/2} dn/dz dz

        # self.z_mean will contain the central values
        self.z_mean = np.linspace(self.zmin, self.zmax, num=self.nbin)

        # Store the z edge values
        self.z_edges = np.linspace(
            self.zmin-self.dz/2., self.zmax+self.dz/2,
            num=self.nbin+1)

        # Store the total vector z, with edges + mean
        self.z = np.linspace(
            self.zmin-self.dz/2., self.zmax+self.dz/2.,
            num=2*self.nbin+1)

        # At the center of each bin, compute the biais function, simply taken
        # as sqrt(z_mean+1)
        self.b = np.sqrt(self.z_mean+1)

        # Define the k values for the integration (from kmin to kmax), at which
        # the spectrum will be computed (and stored for the fiducial model)
        # k_size is deeply arbitrary here, TODO
        self.k_fid = np.logspace(
            log10(self.kmin), log10(self.kmax), num=self.k_size)

        ################
        # Noise spectrum TODO properly
        ################

        self.n_g = np.zeros(self.nbin, 'float64')

        # obsolete settings from 2012
        #self.n_g = np.array([6844.945, 7129.45,
        #                     7249.912, 7261.722,
        #                     7203.825, 7103.047,
        #                     6977.571, 6839.546,
        #                     6696.957, 5496.988,
        #                     4459.240, 3577.143,
        #                     2838.767, 2229.282,
        #                     1732.706, 1333.091])
        #self.n_g = self.n_g * self.efficiency * 41253.

        # euclid 2016 settings
        self.n_g = np.array([2434.280, 4364.812,
                             4728.559, 4825.798,
                             4728.797, 4507.625,
                             4269.851, 3720.657,
                             3104.309, 2308.975,
                             1514.831, 1474.707,
                             893.716, 497.613])

        self.n_g = self.n_g * self.fsky * 41253 #sky coverage in deg ^2

        # If the file exists, initialize the fiducial values, the spectrum will
        # be read first, with k_size values of k and nbin values of z. Then,
        # H_fid and D_A fid will be read (each with nbin values).
        self.fid_values_exist = False
        self.pk_nl_fid = np.zeros((self.k_size, 2*self.nbin+1), 'float64')
        self.H_fid = np.zeros(2*self.nbin+1, 'float64')
        self.D_A_fid = np.zeros(2*self.nbin+1, 'float64')
        self.sigma_r_fid = np.zeros(self.nbin, 'float64')

        fid_file_path = os.path.join(self.data_directory, self.fiducial_file)
        if os.path.exists(fid_file_path):
            self.fid_values_exist = True
            with open(fid_file_path, 'r') as fid_file:
                line = fid_file.readline()
                while line.find('#') != -1:
                    line = fid_file.readline()
                while (line.find('\n') != -1 and len(line) == 1):
                    line = fid_file.readline()
                for index_k in xrange(self.k_size):
                    for index_z in xrange(2*self.nbin+1):
                        self.pk_nl_fid[index_k, index_z] = float(line)
                        line = fid_file.readline()
                for index_z in xrange(2*self.nbin+1):
                    self.H_fid[index_z] = float(line.split()[0])
                    self.D_A_fid[index_z] = float(line.split()[1])
                    line = fid_file.readline()
                for index_z in xrange(self.nbin):
                    self.sigma_r_fid[index_z] = float(line)
                    line = fid_file.readline()

        # Else the file will be created in the loglkl() function.
        return
コード例 #39
0
ファイル: __init__.py プロジェクト: B-Rich/montepython_public
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        self.need_cosmo_arguments(data, {'output': 'mPk'})
コード例 #40
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        self.need_cosmo_arguments(data, {'output': 'mPk'})
        self.need_cosmo_arguments(data, {'z_max_pk': self.zmax})
        self.need_cosmo_arguments(data, {'P_k_max_1/Mpc': 1.5*self.k_cut(self.zmax)})

        # Compute non-linear power spectrum if requested
	if self.use_halofit:
            	self.need_cosmo_arguments(data, {'non linear':'halofit'})
		print("Using halofit")

        # Deduce the dz step from the number of bins and the edge values of z
        self.dz = (self.zmax-self.zmin)/self.nbin

	# Compute new zmin and zmax which are bin centers
	# Need to be defined as edges if zmin can be close to z=0
	self.zmin += self.dz/2.
	self.zmax -= self.dz/2.

        # self.z_mean will contain the central values
        self.z_mean = np.linspace(self.zmin, self.zmax, num=self.nbin)

        # Store the total vector z, with edges + mean
        self.z = np.linspace(
            self.zmin-self.dz/2., self.zmax+self.dz/2.,
            num=2*self.nbin+1)

        # Compute the number of galaxies for each \bar z
        # N_g(\bar z) = int_{\bar z - dz/2}^{\bar z + dz/2} dn/dz dz
	# dn/dz fit formula from 1412.4700v2: 10^c1*z^c2*e^-c3z
	# dn/dz = number of galaxies per redshift and deg^2
	self.N_g = np.zeros(self.nbin)
	N_tot = 0.0
	for index_z in xrange(self.nbin):
		self.N_g[index_z], error = scipy.integrate.quad(self.dndz, self.z_mean[index_z]-self.dz/2., self.z_mean[index_z]+self.dz/2.)
		assert error/self.N_g[index_z] <= 0.001, ("dndz integration error is bigger than 0.1%")
		N_tot += self.N_g[index_z]

	# Ntot output
	#print("\nSKA2: Number of detected galaxies and bias in each redshift bin:")
	#for index_z in xrange(self.nbin):
	#	print("z-bin[" + str(self.z_mean[index_z]-self.dz/2.) + "," + str(self.z_mean[index_z]+self.dz/2.) + "]: \tN = %.4g" % (self.N_g[index_z]) + " ,\t b = %.4g" % (b[index_z]))
	#print("Total number of detected galaxies: N = %.4g\n" % (N_tot))

        # Define the k values for the integration (from kmin to kmax), at which
        # the spectrum will be computed (and stored for the fiducial model)
        self.k_fid = np.logspace(
            log10(self.kmin), log10(self.k_cut(self.zmax)), num=self.k_size)

        # Define the mu scale
        self.mu_fid = np.linspace(-1, 1, self.mu_size)

        # If the file exists, initialize the fiducial values, the spectrum will
        # be read first, with k_size values of k and nbin values of z. Then,
        # H_fid and D_A fid will be read (each with nbin values).
	# Then V_fid, b_fid and the fiducial errors on real space coordinates follow.
        self.fid_values_exist = False
        self.pk_nl_fid = np.zeros((self.k_size, 2*self.nbin+1), 'float64')
        if self.use_linear_rsd:
            self.pk_lin_fid = np.zeros((self.k_size, 2*self.nbin+1), 'float64')
        self.H_fid = np.zeros(2*self.nbin+1, 'float64')
        self.D_A_fid = np.zeros(2*self.nbin+1, 'float64')
        self.V_fid = np.zeros(self.nbin, 'float64')
        self.b_fid = np.zeros(self.nbin, 'float64')
	self.sigma_A_fid = np.zeros(self.nbin, 'float64')
	self.sigma_B_fid = np.zeros(self.nbin, 'float64')

        fid_file_path = os.path.join(self.data_directory, self.fiducial_file)
        if os.path.exists(fid_file_path):
            self.fid_values_exist = True
            with open(fid_file_path, 'r') as fid_file:
                line = fid_file.readline()
                while line.find('#') != -1:
                    line = fid_file.readline()
                while (line.find('\n') != -1 and len(line) == 1):
                    line = fid_file.readline()
                for index_k in xrange(self.k_size):
                    for index_z in xrange(2*self.nbin+1):
                        if self.use_linear_rsd:
                            self.pk_nl_fid[index_k, index_z] = float(line.split()[0])
                            self.pk_lin_fid[index_k, index_z] = float(line.split()[1])
                        else:
                            self.pk_nl_fid[index_k, index_z] = float(line)
                        line = fid_file.readline()
                for index_z in xrange(2*self.nbin+1):
                    self.H_fid[index_z] = float(line.split()[0])
                    self.D_A_fid[index_z] = float(line.split()[1])
                    line = fid_file.readline()
                for index_z in xrange(self.nbin):
                    self.V_fid[index_z] = float(line.split()[0])
                    self.b_fid[index_z] = float(line.split()[1])
                    line = fid_file.readline()
                for index_z in xrange(self.nbin):
                    self.sigma_A_fid[index_z] = float(line.split()[0])
                    self.sigma_B_fid[index_z] = float(line.split()[1])
                    line = fid_file.readline()
		self.sigma_NL_fid = float(line)

        # Else the file will be created in the loglkl() function.
        return
コード例 #41
0
ファイル: __init__.py プロジェクト: B-Rich/montepython_public
  def __init__(self, path, data, command_line):

    Likelihood.__init__(self, path, data, command_line)

    self.need_cosmo_arguments(data, {'output':'mPk'})
    
    #################
    # find number of galaxies for each mean redshift value
    #################

    # Compute the number of galaxies for each \bar z
    # For this, one needs dn/dz TODO
    # then n_g(\bar z) = int_{\bar z - dz/2}^{\bar z + dz/2} dn/dz dz
    # self.z_mean will contain the central values

    self.z_mean = np.zeros(self.nbin,'float64')

    # Deduce the dz step from the number of bins and the edge values of z
    self.dz = (self.zmax-self.zmin)/(self.nbin-1.)
    i=0
    for z in np.arange(self.zmin,self.zmax+self.dz,self.dz):
      self.z_mean[i] = z
      i+=1

    # Store the z edge values
    self.z_edges = np.zeros(self.nbin+1,'float64')
    i = 0
    for z in np.arange(self.zmin-self.dz/2.,self.zmax+self.dz,self.dz):
      self.z_edges[i] = z
      i+=1

    # Store the total vector z, with edges + mean
    self.z = np.zeros(2*self.nbin+1,'float64')
    i=0
    for z in np.arange(self.zmin-self.dz/2.,self.zmax+self.dz,self.dz/2.):
      self.z[i] = z
      i+=1

    self.need_cosmo_arguments(data,{'z_max_pk':self.z[-1]})

    # For each bin, compute the biais function,
    self.b = np.zeros(self.nbin,'float64')
    for Bin in range(self.nbin):
      self.b[Bin] = sqrt(self.z_mean[Bin]+1.)

    # Force the cosmological module to store Pk for k up to an arbitrary number
    # (since self.r is not yet decided)... TODO
    self.need_cosmo_arguments(data,{'P_k_max_1/Mpc':1.5*self.kmax})

    # Define the k values for the integration (from kmin to kmax), at which the
    # spectrum will be computed (and stored for the fiducial model)
    # k_size is deeply arbitrary here, TODO
    
    self.k_fid = np.zeros(self.k_size,'float64')
    for i in range(self.k_size):
      self.k_fid[i] = exp( i*1.0 /(self.k_size-1) * log(self.kmax/self.kmin) + log(self.kmin))

    ################
    # Noise spectrum TODO properly
    ################

    self.n_g = np.zeros(self.nbin,'float64')
    #for index_z in range(self.nbin):
      #print self.z_mean[index_z],self.z[2*index_z],self.z[2*index_z+2]
      #self.n_g[index_z] = self.galaxy_distribution(self.z[2*index_z+2]) - self.galaxy_distribution(self.z[2*index_z])
    #print self.n_g

    self.n_g[0] = 6844.945
    self.n_g[1] = 7129.45
    self.n_g[2] = 7249.912
    self.n_g[3] = 7261.722
    self.n_g[4] = 7203.825
    self.n_g[5] = 7103.047
    self.n_g[6] = 6977.571
    self.n_g[7] = 6839.546
    self.n_g[8] = 6696.957
    self.n_g[9] = 5496.988
    self.n_g[10] = 4459.240
    self.n_g[11] = 3577.143
    self.n_g[12] = 2838.767
    self.n_g[13] = 2229.282
    self.n_g[14] = 1732.706
    self.n_g[15] = 1333.091

    self.n_g = self.n_g * self.efficiency * 41253.

    # If the file exists, initialize the fiducial values,
    # the spectrum will be read first, with k_size values of k and nbin values
    # of z. Then, H_fid and D_A fid will be read (each with nbin values).
    #self.Cl_fid = np.zeros((self.nlmax,self.nbin,self.nbin),'float64')
    self.fid_values_exist = False
    self.pk_nl_fid = np.zeros((self.k_size,2*self.nbin+1),'float64')
    self.H_fid       = np.zeros(2*self.nbin+1,'float64')
    self.D_A_fid     = np.zeros(2*self.nbin+1,'float64') 
    self.sigma_r_fid = np.zeros(self.nbin,'float64')

    if os.path.exists(self.data_directory+'/'+self.fiducial_file):
      self.fid_values_exist = True
      fid_file = open(os.path.join(
          self.data_directory, self.fiducial_file),'r')
      line = fid_file.readline()
      while line.find('#')!=-1:
	line = fid_file.readline()
      while (line.find('\n')!=-1 and len(line)==1):
	line = fid_file.readline()
      for index_k in range(self.k_size):
	for index_z in range(2*self.nbin+1):
	  self.pk_nl_fid[index_k,index_z] = float(line)
	  line = fid_file.readline()
      for index_z in range(2*self.nbin+1):
	self.H_fid[index_z]   = float(line.split()[0])
	self.D_A_fid[index_z] = float(line.split()[1])
	line = fid_file.readline()
      for index_z in range(self.nbin):
	self.sigma_r_fid[index_z] = float(line)
	line = fid_file.readline()
      fid_file.seek(0)
      fid_file.close()
      
    # Else the file will be created in the loglkl() function. 
    return
コード例 #42
0
ファイル: __init__.py プロジェクト: B-Rich/montepython_public
  def __init__(self, path, data, command_line):

    Likelihood.__init__(self, path, data, command_line)

    self.need_cosmo_arguments(data,{'output':'mPk'})

    # Define array of l values, and initialize them
    self.l = np.zeros(self.nlmax,'float64')
    for nl in range(self.nlmax):
      self.l[nl] = 1.*math.exp(self.dlnl*nl)
    
    #print self.l[:]
    #exit()

    ########################################################
    # Find distribution of dn_dz (not normalized) in each bin
    ########################################################
    
    # Assuming each bin contains the same number of galaxies, we find the bin
    # limits in z space

    # Compute the total number of galaxies until zmax (no normalization yet)

    n_tot = 0.
    for z in np.arange(0,self.zmax+self.dz,self.dz):
      gd_1 = self.galaxy_distribution(z)
      gd_2 = self.galaxy_distribution(z+self.dz)
      n_tot += 0.5*(gd_1+gd_2)*self.dz

    # For each bin, compute the limit in z space
  
    # Create the array that will contain the z boundaries for each bin. The
    # first value is already correctly set to 0.
    self.z_bin_edge = np.zeros(self.nbin+1,'float64')

    for Bin in range(self.nbin-1):

      bin_count = 0.
      z =self.z_bin_edge[Bin]

      while (bin_count <= n_tot/self.nbin):
	gd_1 = self.galaxy_distribution(z)
	gd_2 = self.galaxy_distribution(z+self.dz)
	bin_count += 0.5*(gd_1+gd_2)*self.dz
	z += self.dz

      self.z_bin_edge[Bin+1] = z

    self.z_bin_edge[self.nbin] = self.zmax
    
    # Fill array of discrete z values
    self.z = np.zeros(self.nzmax,'float64')
    for nz in range(self.nzmax):
      self.z[nz] = (nz*1.0)/(self.nzmax-1.0)*self.zmax

    # Force the cosmological module to store Pk for redshifts up to max(self.z)
    self.need_cosmo_arguments(data,{'z_max_pk':self.z[-1]})
    # Force the cosmological module to store Pk for k up to an arbitrary number
    # (since self.r is not yet decided)... TODO
    self.need_cosmo_arguments(data,{'P_k_max_1/Mpc':self.k_max})

    # Fill distribution for each bin (convolving with photo_z distribution)
    self.eta_z = np.zeros((self.nzmax,self.nbin),'float64')
    for Bin in range(self.nbin):

      for nz in range(self.nzmax):
	z = self.z[nz]
	self.eta_z[nz,Bin] = 0.

	for nz2 in range(1,self.nzmax):

	  if ((self.z[nz2] >= self.z_bin_edge[Bin]) and (self.z[nz2] <= self.z_bin_edge[Bin+1])):
	    gd  = self.galaxy_distribution(self.z[nz2])
	    pzd = self.photo_z_distribution(z,self.z[nz2])
	    integrand_plus = gd*pzd
	  else:
	    integrand_plus = 0.

	  if ((self.z[nz2-1] >= self.z_bin_edge[Bin]) and (self.z[nz2-1] <= self.z_bin_edge[Bin+1])):
	    gd  = self.galaxy_distribution(self.z[nz2-1])
	    pzd = self.photo_z_distribution(z,self.z[nz2-1])
	    integrand_minus = gd*pzd
	  else:
	    integrand_minus = 0.
	  
	  self.eta_z[nz,Bin] += 0.5*(integrand_plus+integrand_minus)*(self.z[nz2]-self.z[nz2-1])

    #for nz in range(self.nzmax):
    #  print self.z[nz],self.eta_z[nz,0],self.eta_z[nz,1],self.eta_z[nz,2],self.eta_z[nz,3],self.eta_z[nz,4],self.galaxy_distribution(self.z[nz])
    #exit()  

    # integrate eta(z) over z (in view of normalizing it to one)
    self.eta_norm = np.zeros(self.nbin,'float64')
    for Bin in range(self.nbin):
      self.eta_norm[Bin] = np.sum(0.5*(self.eta_z[1:,Bin]+self.eta_z[:-1,Bin])*(self.z[1:]-self.z[:-1]))

    ################
    # Noise spectrum 
    ################
    
    # Number of galaxies per steradian
    self.noise = 3600.*self.gal_per_sqarcmn*(180./math.pi)**2

    # Number of galaxies per steradian per bin
    self.noise = self.noise/self.nbin
    
    # Noise spectrum (diagonal in bin*bin space, independent of l and Bin)
    self.noise = self.rms_shear**2/self.noise

    # TEST
    #self.noise = 0 
    
    
    ###########
    # Read data
    ###########

    # If the file exists, initialize the fiducial values
    self.Cl_fid = np.zeros((self.nlmax,self.nbin,self.nbin),'float64')
    self.fid_values_exist = False
    if os.path.exists(self.data_directory+'/'+self.fiducial_file):
      self.fid_values_exist = True
      fid_file = open(os.path.join(
          self.data_directory, self.fiducial_file),'r')
      line = fid_file.readline()
      while line.find('#')!=-1:
	line = fid_file.readline()
      while (line.find('\n')!=-1 and len(line)==1):
	line = fid_file.readline()
      for nl in range(self.nlmax):
	for Bin in range(self.nbin):
	  for Bin2 in range(self.nbin):
	    self.Cl_fid[nl,Bin,Bin2] = float(line)
	    line = fid_file.readline()
      
    # Else the file will be created in the loglkl() function. 
    return
コード例 #43
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # Force the cosmological module to store Pk for redshifts up to
        # max(self.z) and for k up to k_max
        # Technically we shouldn't need cosmo 2 to get Pk but might give a bug
        self.need_cosmo1_arguments(data, {'output': 'mPk', 'P_k_max_h/Mpc': self.k_max_h_by_Mpc})
        self.need_cosmo2_arguments(data, {'output': 'mPk', 'P_k_max_h/Mpc': self.k_max_h_by_Mpc})
        self.need_cosmo1_arguments(data, {'nonlinear_min_k_max': self.nonlinear_min_k_max})
        self.need_cosmo2_arguments(data, {'nonlinear_min_k_max': self.nonlinear_min_k_max})
        ## Compute non-linear power spectrum if requested
        # it seems like HMcode needs the full argument to work...
        if self.method_non_linear_Pk in ['halofit', 'HALOFIT', 'Halofit', 'hmcode', 'Hmcode', 'HMcode', 'HMCODE']:
            self.need_cosmo1_arguments(data, {'non linear': self.method_non_linear_Pk})
            self.need_cosmo2_arguments(data, {'non linear': self.method_non_linear_Pk})
            print('Using {:} to obtain the non-linear P(k, z)!'.format(self.method_non_linear_Pk))
        else:
            print('Only using the linear P(k, z) for ALL calculations \n (check keywords for "method_non_linear_Pk").')

        # set up array of ells for Cl integrations:
        self.ells = np.logspace(np.log10(self.ell_min), np.log10(self.ell_max), self.nells)

        self.config_xip_binned1 = {'xip_binned1': { 'output_section_name' : self.xip_output_section_name,
                                          'input_section_name' : self.xip_input_section_name,
                                          'type' : self.xip_type,
                                          'theta_min' : self.xip_theta_min,
                                          'theta_max' : self.xip_theta_max,
                                          'nTheta' : self.xip_nTheta,
                                          'weighted_binning' : self.xip_weighted_binning,
                                          'InputNpair' : self.xip_InputNpair,
                                          'InputNpair_suffix' : self.xip_InputNpair_suffix,
                                          'Column_theta' : self.xip_Column_theta,
                                          'Column_Npair' : self.xip_Column_Npair,
                                          'nBins_in' : self.xip_nBins_in,
                                          'add_2D_cterm' : self.xip_add_2D_cterm,
                                          'add_c_term' :self.xip_add_c_term,
                                          }}
        self.xip_binned_module1 = cosmosis.runtime.module.Module(module_name='xip_binned1',
                                            file_path=os.path.join(self.kcap_directory,
                                            'cosebis/libxipm_binned.so'))

        self.xip_binned_module1.setup(dict_to_datablock(self.config_xip_binned1))

        self.config_xim_binned1 = {'xim_binned1': { 'output_section_name' : self.xim_output_section_name,
                                          'input_section_name' : self.xim_input_section_name,
                                          'type' : self.xim_type,
                                          'theta_min' : self.xim_theta_min,
                                          'theta_max' : self.xim_theta_max,
                                          'nTheta' : self.xim_nTheta,
                                          'weighted_binning' : self.xim_weighted_binning,
                                          'InputNpair' : self.xim_InputNpair,
                                          'InputNpair_suffix' : self.xim_InputNpair_suffix,
                                          'Column_theta' : self.xim_Column_theta,
                                          'Column_Npair' : self.xim_Column_Npair,
                                          'nBins_in' : self.xim_nBins_in,
                                          'add_2D_cterm' : self.xim_add_2D_cterm,
                                          'add_c_term' :self.xim_add_c_term,
                                          }}
        self.xim_binned_module1 = cosmosis.runtime.module.Module(module_name='xim_binned1',
                                            file_path=os.path.join(self.kcap_directory,
                                            'cosebis/libxipm_binned.so'))
        self.xim_binned_module1.setup(dict_to_datablock(self.config_xim_binned1))

        # self.config_xip_binned2 = {'xip_binned2': { 'output_section_name' : self.xip_output_section_name,
        #                                   'input_section_name' : self.xip_input_section_name,
        #                                   'type' : self.xip_type,
        #                                   'theta_min' : self.xip_theta_min,
        #                                   'theta_max' : self.xip_theta_max,
        #                                   'nTheta' : self.xip_nTheta,
        #                                   'weighted_binning' : self.xip_weighted_binning,
        #                                   'InputNpair' : self.xip_InputNpair,
        #                                   'InputNpair_suffix' : self.xip_InputNpair_suffix,
        #                                   'Column_theta' : self.xip_Column_theta,
        #                                   'Column_Npair' : self.xip_Column_Npair,
        #                                   'nBins_in' : self.xip_nBins_in,
        #                                   'add_2D_cterm' : self.xip_add_2D_cterm,
        #                                   'add_c_term' :self.xip_add_c_term}}
        # self.xip_binned_module2 = cosmosis.runtime.module.Module(module_name='xip_binned2',
        #                                     file_path=os.path.join(self.kcap_directory,
        #                                     'cosebis/libxipm_binned.so'))

        # self.xip_binned_module2.setup(dict_to_datablock(self.config_xip_binned2))
        #
        # self.config_xim_binned2 = {'xim_binned2': { 'output_section_name' : self.xim_output_section_name,
        #                                   'input_section_name' : self.xim_input_section_name,
        #                                   'type' : self.xim_type,
        #                                   'theta_min' : self.xim_theta_min,
        #                                   'theta_max' : self.xim_theta_max,
        #                                   'nTheta' : self.xim_nTheta,
        #                                   'weighted_binning' : self.xim_weighted_binning,
        #                                   'InputNpair' : self.xim_InputNpair,
        #                                   'InputNpair_suffix' : self.xim_InputNpair_suffix,
        #                                   'Column_theta' : self.xim_Column_theta,
        #                                   'Column_Npair' : self.xim_Column_Npair,
        #                                   'nBins_in' : self.xim_nBins_in,
        #                                   'add_2D_cterm' : self.xim_add_2D_cterm,
        #                                   'add_c_term' :self.xim_add_c_term}}
        # self.xim_binned_module2 = cosmosis.runtime.module.Module(module_name='xim_binned2',
        #                                     file_path=os.path.join(self.kcap_directory,
        #                                     'cosebis/libxipm_binned.so'))
        # self.xim_binned_module2.setup(dict_to_datablock(self.config_xim_binned2))

        # set up KCAP's scale cuts module here:
        # Initialize the scale cuts module from CosmoSIS:
        self.config_scale_cuts1= {'scale_cuts1': {'data_and_covariance_fits_filename': os.path.join(self.data_directory, self.data_file),
                                                 #'scale_cuts_option': self.scale_cuts_option1,
                                                 'use_stats': 'xiP xiM',
                                                 'xi_plus_extension_name' : 'xiP',
                                                 'xi_minus_extension_name' : 'xiM',
                                                 'xi_plus_section_name' : 'shear_xi_plus_binned',
                                                 'xi_minus_section_name' : 'shear_xi_minus_binned',
                                                 'cosebis_section_name' : 'cosebis',
                                                 'simulate' : False,
                                                 'simulate_with_noise' : True,
                                                 'output_section_name': 'scale_cuts_output',
                                                 }}

        # for now we only look for these two keywords:
        if hasattr(self, 'keep_ang_xiP1'):
            self.config_scale_cuts1['scale_cuts1'].update({'keep_ang_xiP': self.keep_ang_xiP1})
        if hasattr(self, 'cut_pair_xiP1'):
            self.config_scale_cuts1['scale_cuts1'].update({'cut_pair_xiP': self.cut_pair_xiP1})
        if hasattr(self, 'keep_ang_xiM1'):
            self.config_scale_cuts1['scale_cuts1'].update({'keep_ang_xiM': self.keep_ang_xiM1})
        if hasattr(self, 'cut_pair_xiM1'):
            self.config_scale_cuts1['scale_cuts1'].update({'cut_pair_xiM': self.cut_pair_xiM1})

        # import scale_cuts as CosmoSIS module
        self.scale_cuts_module1 = cosmosis.runtime.module.Module(module_name='scale_cuts1',
                                            file_path=os.path.join(self.kcap_directory,
                                            'modules/scale_cuts/scale_cuts.py'))

        # during set up the module stores the cut data vec and covmat in its data
        # attribute
        self.scale_cuts_module1.setup(dict_to_datablock(self.config_scale_cuts1))

        # self.config_scale_cuts2= {'scale_cuts2': {'data_and_covariance_fits_filename': os.path.join(self.data_directory, self.data_file),
        #                                          #'scale_cuts_option': self.scale_cuts_option1,
        #                                          'use_stats': 'xiP xiM',
        #                                          'xi_plus_extension_name' : 'xiP',
        #                                          'xi_minus_extension_name' : 'xiM',
        #                                          'xi_plus_section_name' : 'shear_xi_plus_binned',
        #                                          'xi_minus_section_name' : 'shear_xi_minus_binned',
        #                                          'cosebis_section_name' : 'cosebis',
        #                                          'simulate' : False,
        #                                          'simulate_with_noise' : True,
        #                                          'output_section_name': 'scale_cuts_output'}}

        # for now we only look for these two keywords:
        # if hasattr(self, 'keep_ang_xiP2'):
        #     self.config_scale_cuts2['scale_cuts2'].update({'keep_ang_xiP': self.keep_ang_xiP2})
        # if hasattr(self, 'cut_pair_xiP2'):
        #     self.config_scale_cuts2['scale_cuts2'].update({'cut_pair_xiP': self.cut_pair_xiP2})
        # if hasattr(self, 'keep_ang_xiM2'):
        #     self.config_scale_cuts2['scale_cuts2'].update({'keep_ang_xiM': self.keep_ang_xiM2})
        # if hasattr(self, 'cut_pair_xiM2'):
        #     self.config_scale_cuts2['scale_cuts2'].update({'cut_pair_xiM': self.cut_pair_xiM2})

        # import scale_cuts as CosmoSIS module
        # self.scale_cuts_module2 = cosmosis.runtime.module.Module(module_name='scale_cuts2',
        #                                     file_path=os.path.join(self.kcap_directory,
        #                                     'modules/scale_cuts/scale_cuts.py'))

        # during set up the module stores the cut data vec and covmat in its data
        # attribute
        # self.scale_cuts_module2.setup(dict_to_datablock(self.config_scale_cuts2))

        # this works now:
        self.data_vec = self.scale_cuts_module1.data['data']
        covmat = self.scale_cuts_module1.data['covariance']
        #print(self.cosebis_obs1.shape)
        # we don't need the covmat_block
        #covmat_block1 = self.scale_cuts_module1.data['covariance']
        #print(covmat_block1.shape)

        # self.data_vec2 = self.scale_cuts_module2.data['data']
        #print(self.cosebis_obs2.shape)
        # we don't need the covmat_block
        #covmat_block2 = self.scale_cuts_module2.data['covariance']
        #print(covmat_block2.shape)

        # concatenate to one data vector:
        # self.data_vec = np.concatenate((self.data_vec1, self.data_vec2))
        #print(self.cosebis_obs.shape)

        # the approach below does NOT work, as we would be missing the cross blocks!!!
        # build a combined covmat, for that to work we assume, that the cov-mat dimension fits
        # to the size of the *uncut*, single data-vector and is ordered in the same way as the
        # *final* data-vector created here!

        # bmat can't deal with empty blocks...
        '''
        if covmat_block2.size != 0:
            covmat = np.asarray(np.bmat('covmat_block1, covmat_block2; covmat_block1, covmat_block2'))
        else:
            covmat = covmat_block1
        '''

        # Read dn_dz from data FITS file:
        #z_samples, hist_samples = self.__load_legacy_nofz()
        # in the process we also set self.nzbins!
        data_vec_uncut, covmat_uncut, self.z_samples, self.hist_samples = self.load_data_file()

        # instead we infer a masking array by comparing the uncut data vector
        # to the cut data vectors and apply the mask then to a stacked block
        # matrix for which each block consists of an uncut covmat
        # mask1 = self.get_mask(data_vec_uncut, self.data_vec)
        # mask2 = self.get_mask(data_vec_uncut, self.data_vec2)
        # mask_indices = np.where(np.concatenate((mask1, mask2)) == 1)[0]
        # mask_indices = np.where(np.concatenate(mask1) == 1)[0]
        # covmat = covmat_uncut[np.ix_(mask_indices, mask_indices)]
        # covmat = np.bmat([[covmat_uncut, covmat_uncut],[covmat_uncut, covmat_uncut]])
        # covmat = covmat[np.ix_(mask_indices, mask_indices)]
        #print(covmat.shape)

        # precompute Cholesky transform for chi^2 calculation:
        self.cholesky_transform = cholesky(covmat, lower=True)

        # Check if any of the n(z) needs to be shifted in loglkl by D_z{1...n}:
        self.shift_n_z_by_D_z = np.zeros((1, self.nzbins), 'bool')
        for zbin in xrange(self.nzbins):
            param_name = 'D_z{:}'.format(zbin + 1)
            if param_name in data.mcmc_parameters:
                self.shift_n_z_by_D_z[0, zbin] = True
            # param_name = 'D_z{:}_2'.format(zbin + 1)
            # if param_name in data.mcmc_parameters:
            #     self.shift_n_z_by_D_z[1, zbin] = True

        if self.shift_n_z_by_D_z[0, :].any():
            # load the correlation matrix of the D_z shifts:
            try:
                fname = os.path.join(self.data_directory, self.filename_corrmat_D_z)
                corrmat_D_z = np.loadtxt(fname)
                print('Loaded correlation matrix for D_z<i>_1 shifts from: \n {:} \n'.format(fname))
                self.L_matrix_D_z = np.linalg.cholesky(corrmat_D_z)
            except:
                print('Could not load correlation matrix of D_z<i>_1 shifts, hence treating them as independent! \n')
                self.L_matrix_D_z = np.eye(self.nzbins)

        # if self.shift_n_z_by_D_z[1, :].any():
        #     # load the correlation matrix of the D_z shifts:
        #     try:
        #         fname = os.path.join(self.data_directory, self.filename_corrmat_D_z_2)
        #         corrmat_D_z_2 = np.loadtxt(fname)
        #         print('Loaded correlation matrix for D_z<i>_2 shifts from: \n {:} \n'.format(fname))
        #         self.L_matrix_D_z_2 = np.linalg.cholesky(corrmat_D_z_2)
        #     except:
        #         print('Could not load correlation matrix of D_z<i>_2 shifts, hence treating them as independent! \n')
        #         self.L_matrix_D_z_2 = np.eye(self.nzbins)

        # prevent undersampling of histograms!
        if self.nzmax < len(self.z_samples) - 1:
            print("You're trying to integrate at lower resolution than supplied by the n(z) histograms. \n Increase nzmax! Aborting now...")
            exit()
        # if that's the case, we want to integrate at histogram resolution and need to account for
        # the extra zero entry added
        elif self.nzmax == len(self.z_samples) - 1:
            self.nzmax = self.z_samples.shape[1]
            # requires that z-spacing is always the same for all bins...
            self.z_p = self.z_samples[0, :]
            print('Integrations performed at resolution of histogram!')
        # if we interpolate anyway at arbitrary resolution the extra 0 doesn't matter
        else:
            self.nzmax += 1
            self.z_p = np.linspace(self.z_samples.min(), self.z_samples.max(), self.nzmax)
            print('Integration performed at set nzmax resolution!')
        if self.z_p[0] == 0:
            self.z_p[0] = 0.0001
        self.pz = np.zeros((self.nzmax, self.nzbins))
        self.pz_norm = np.zeros(self.nzbins, 'float64')
        self.splines_pz = []
        for zbin in xrange(self.nzbins):
                # we assume that the z-spacing is the same for each histogram
                spline_pz = itp.interp1d(self.z_samples[zbin, :], self.hist_samples[zbin, :], kind=self.type_redshift_interp)
                self.splines_pz.append(spline_pz)
                mask_min = self.z_p >= self.z_samples[zbin, :].min()
                mask_max = self.z_p <= self.z_samples[zbin, :].max()
                mask = mask_min & mask_max
                # points outside the z-range of the histograms are set to 0!
                #self.pz[mask, zbin] = itp.splev(self.z_p[mask], spline_pz)
                self.pz[mask, zbin] = spline_pz(self.z_p[mask])
                # Normalize selection functions
                dz = self.z_p[1:] - self.z_p[:-1]
                self.pz_norm[zbin] = np.sum(0.5 * (self.pz[1:, zbin] + self.pz[:-1, zbin]) * dz)

        self.zmax = self.z_p.max()
        self.need_cosmo1_arguments(data, {'z_max_pk': self.zmax})
        # Technically not needed
        self.need_cosmo2_arguments(data, {'z_max_pk': self.zmax})

        # Initialize the BandPowers module from CosmoSIS:
        config_theory = {'cl2xi': {'corr_type': 0,
                                   #'input_section_name' : 'shear_cl',
                                   #'output_section_name' : 'shear_xi',
                                   #'sample_a' : 'xiP',
                                   #'sample_b' : 'xiM'
                                    }}


        # needs to point down to one of the '.so' files in '../kcap/cosebis/':
        self.theory_module = cosmosis.runtime.module.Module(module_name='cl2xi',
                                            file_path=os.path.join(self.kcap_directory,
                                            'cosmosis-standard-library/shear/cl_to_xi_nicaea/nicaea_interface.so'))

        self.theory_module.setup(dict_to_datablock(config_theory))
        return
コード例 #44
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # Force the cosmological module to store Pk for redshifts up to
        # max(self.z) and for k up to k_max
        self.need_cosmo_arguments(data, {"output": "mPk"})
        self.need_cosmo_arguments(data, {"z_max_pk": self.zmax})
        self.need_cosmo_arguments(data, {"P_k_max_h/Mpc": self.k_max_h_by_Mpc})

        # Compute non-linear power spectrum if requested
        if self.use_halofit:
            self.need_cosmo_arguments(data, {"non linear": "halofit"})

        # Define array of l values, and initialize them
        # It is a logspace
        # find nlmax in order to reach lmax with logarithmic steps dlnl
        self.nlmax = np.int(np.log(self.lmax / self.lmin) / self.dlnl) + 1
        # redefine slightly dlnl so that the last point is always exactly lmax
        self.dlnl = np.log(self.lmax / self.lmin) / (self.nlmax - 1)
        self.l = self.lmin * np.exp(self.dlnl * np.arange(self.nlmax))

        ########################################################
        # Find distribution of dn_dz (not normalized) in each bin
        ########################################################
        # Assuming each bin contains the same number of galaxies, we find the
        # bin limits in z space
        # Compute the total number of galaxies until zmax (no normalization
        # yet), that is the integral of the galaxy distribution function from 0
        # to self.zmax
        n_tot, error = scipy.integrate.quad(self.galaxy_distribution, 0, self.zmax)
        assert error <= 1e-7, "The integration of the galaxy distribution is not as " "precise as expected."

        # For each bin, compute the limit in z space

        # Create the array that will contain the z boundaries for each bin. The
        # first value is already correctly set to 0.
        self.z_bin_edge = np.zeros(self.nbin + 1, "float64")

        for Bin in xrange(self.nbin - 1):
            bin_count = 0.0
            z = self.z_bin_edge[Bin]
            while bin_count <= n_tot / self.nbin:
                gd_1 = self.galaxy_distribution(z)
                gd_2 = self.galaxy_distribution(z + self.dz)
                bin_count += 0.5 * (gd_1 + gd_2) * self.dz
                z += self.dz
            self.z_bin_edge[Bin + 1] = z
        self.z_bin_edge[self.nbin] = self.zmax

        # Fill array of discrete z values
        self.z = np.linspace(0, self.zmax, num=self.nzmax)

        # Fill distribution for each bin (convolving with photo_z distribution)
        self.eta_z = np.zeros((self.nzmax, self.nbin), "float64")
        gal = self.galaxy_distribution(self.z, True)
        for Bin in xrange(self.nbin):
            low = self.z_bin_edge[Bin]
            hig = self.z_bin_edge[Bin + 1]
            for nz in xrange(self.nzmax):
                z = self.z[nz]
                integrand = gal * self.photo_z_distribution(z, self.z, True)
                integrand = np.array(
                    [elem if low <= self.z[index] <= hig else 0 for index, elem in enumerate(integrand)]
                )
                self.eta_z[nz, Bin] = scipy.integrate.trapz(integrand, self.z)

        # integrate eta(z) over z (in view of normalizing it to one)
        self.eta_norm = np.zeros(self.nbin, "float64")
        for Bin in xrange(self.nbin):
            self.eta_norm[Bin] = np.sum(0.5 * (self.eta_z[1:, Bin] + self.eta_z[:-1, Bin]) * (self.z[1:] - self.z[:-1]))

        ################
        # Noise spectrum
        ################

        # Number of galaxies per steradian
        self.noise = 3600.0 * self.gal_per_sqarcmn * (180.0 / math.pi) ** 2

        # Number of galaxies per steradian per bin
        self.noise = self.noise / self.nbin

        # Noise spectrum (diagonal in bin*bin space, independent of l and Bin)
        self.noise = self.rms_shear ** 2 / self.noise

        ###########
        # Read data
        ###########

        # If the file exists, initialize the fiducial values
        # It has been stored flat, so we use the reshape function to put it in
        # the right shape.
        self.Cl_fid = np.zeros((self.nlmax, self.nbin, self.nbin), "float64")
        self.fid_values_exist = False
        fid_file_path = os.path.join(self.data_directory, self.fiducial_file)
        if os.path.exists(fid_file_path):
            self.fid_values_exist = True
            flat_Cl = np.loadtxt(fid_file_path)
            self.Cl_fid = flat_Cl.reshape((self.nlmax, self.nbin, self.nbin))

        return
コード例 #45
0
ファイル: __init__.py プロジェクト: PoulinV/montepython
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        self.need_cosmo_arguments(data, {'output': 'mPk'})
        self.need_cosmo_arguments(data, {'z_max_pk': self.zmax})
        self.need_cosmo_arguments(data, {'P_k_max_h/Mpc': 1.9 * self.kmax})

        #################
        # find number of galaxies for each mean redshift value
        #################

        # Deduce the dz step from the number of bins and the edge values of z
        self.dz = (self.zmax - self.zmin) / (self.nbin - 1.)

        # Compute the number of galaxies for each \bar z
        # For this, one needs dn/dz TODO
        # then n_g(\bar z) = int_{\bar z - dz/2}^{\bar z + dz/2} dn/dz dz

        # self.z_mean will contain the central values
        self.z_mean = np.linspace(self.zmin, self.zmax, num=self.nbin)

        # Store the z edge values
        self.z_edges = np.linspace(self.zmin - self.dz / 2.,
                                   self.zmax + self.dz / 2,
                                   num=self.nbin + 1)

        # Store the total vector z, with edges + mean
        self.z = np.linspace(self.zmin - self.dz / 2.,
                             self.zmax + self.dz / 2.,
                             num=2 * self.nbin + 1)

        # At the center of each bin, compute the biais function, simply taken
        # as sqrt(z_mean+1)
        self.b = np.sqrt(self.z_mean + 1)

        # Define the k values for the integration (from kmin to kmax), at which
        # the spectrum will be computed (and stored for the fiducial model)
        # k_size is deeply arbitrary here, TODO
        self.k_fid = np.logspace(log10(self.kmin),
                                 log10(self.kmax),
                                 num=self.k_size)

        ################
        # Noise spectrum TODO properly
        ################

        self.n_g = np.zeros(self.nbin, 'float64')

        # obsolete settings from 2012
        #self.n_g = np.array([6844.945, 7129.45,
        #                     7249.912, 7261.722,
        #                     7203.825, 7103.047,
        #                     6977.571, 6839.546,
        #                     6696.957, 5496.988,
        #                     4459.240, 3577.143,
        #                     2838.767, 2229.282,
        #                     1732.706, 1333.091])
        #self.n_g = self.n_g * self.efficiency * 41253.

        # euclid 2016 settings
        self.n_g = np.array([
            2434.280, 4364.812, 4728.559, 4825.798, 4728.797, 4507.625,
            4269.851, 3720.657, 3104.309, 2308.975, 1514.831, 1474.707,
            893.716, 497.613
        ])

        self.n_g = self.n_g * self.fsky * 41253  #sky coverage in deg ^2

        # If the file exists, initialize the fiducial values, the spectrum will
        # be read first, with k_size values of k and nbin values of z. Then,
        # H_fid and D_A fid will be read (each with nbin values).
        self.fid_values_exist = False
        self.pk_nl_fid = np.zeros((self.k_size, 2 * self.nbin + 1), 'float64')
        self.H_fid = np.zeros(2 * self.nbin + 1, 'float64')
        self.D_A_fid = np.zeros(2 * self.nbin + 1, 'float64')
        self.sigma_r_fid = np.zeros(self.nbin, 'float64')

        fid_file_path = os.path.join(self.data_directory, self.fiducial_file)
        if os.path.exists(fid_file_path):
            self.fid_values_exist = True
            with open(fid_file_path, 'r') as fid_file:
                line = fid_file.readline()
                while line.find('#') != -1:
                    line = fid_file.readline()
                while (line.find('\n') != -1 and len(line) == 1):
                    line = fid_file.readline()
                for index_k in xrange(self.k_size):
                    for index_z in xrange(2 * self.nbin + 1):
                        self.pk_nl_fid[index_k, index_z] = float(line)
                        line = fid_file.readline()
                for index_z in xrange(2 * self.nbin + 1):
                    self.H_fid[index_z] = float(line.split()[0])
                    self.D_A_fid[index_z] = float(line.split()[1])
                    line = fid_file.readline()
                for index_z in xrange(self.nbin):
                    self.sigma_r_fid[index_z] = float(line)
                    line = fid_file.readline()

        # Else the file will be created in the loglkl() function.
        return
コード例 #46
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # Force the cosmological module to store Pk for redshifts up to
        # max(self.z) and for k up to k_max
        self.need_cosmo_arguments(data, {'output': 'mPk'})
        self.need_cosmo_arguments(data, {'z_max_pk': self.zmax})
        self.need_cosmo_arguments(data, {'P_k_max_h/Mpc': self.k_max_h_by_Mpc})

        # Compute non-linear power spectrum if requested
        if (self.use_halofit):
            self.need_cosmo_arguments(data, {'non linear':'halofit'})

        # Define array of l values, and initialize them
        # It is a logspace
        # find nlmax in order to reach lmax with logarithmic steps dlnl
        self.nlmax = np.int(np.log(self.lmax)/self.dlnl)+1
        # redefine slightly dlnl so that the last point is always exactly lmax
        self.dlnl = np.log(self.lmax)/(self.nlmax-1)
        self.l = np.exp(self.dlnl*np.arange(self.nlmax))

        # Read dn_dz from window files
        self.z_p = np.zeros(self.nzmax)
        zptemp = np.zeros(self.nzmax)
        self.p = np.zeros((self.nzmax, self.nbin))
        for i in xrange(self.nbin):
            window_file_path = os.path.join(
                self.data_directory, self.window_file[i])
            if os.path.exists(window_file_path):
                zptemp = np.loadtxt(window_file_path, usecols=[0])
                if (i > 0 and np.sum((zptemp-self.z_p)**2) > 1e-6):
                    raise io_mp.LikelihoodError(
                        "The redshift values for the window files "
                        "at different bins do not match")
                self.z_p = zptemp
                self.p[:, i] = np.loadtxt(window_file_path, usecols=[1])
            else:
                raise io_mp.LikelihoodError("File not found:\n %s"%window_file_path)

        # Read measurements of xi+ and xi-
        nt = (self.nbin)*(self.nbin+1)/2
        self.theta_bins = np.zeros(2*self.ntheta)
        self.xi_obs = np.zeros(self.ntheta*nt*2)
        xipm_file_path = os.path.join(
            self.data_directory, self.xipm_file)
        if os.path.exists(xipm_file_path):
            self.theta_bins = np.loadtxt(xipm_file_path)[:, 0]
            if (np.sum(
                (self.theta_bins[:self.ntheta] -
                    self.theta_bins[self.ntheta:])**2) > 1e-6):
                raise io_mp.LikelihoodError(
                    "The angular values at which xi+ and xi- "
                    "are observed do not match")
            temp = np.loadtxt(xipm_file_path)[:, 1:]
        else:
            raise io_mp.LikelihoodError("File not found:\n %s"%xipm_file_path)

        k = 0
        for j in xrange(nt):
            for i in xrange(2*self.ntheta):
                self.xi_obs[k] = temp[i, j]
                k = k + 1

        # Read covariance matrix
        ndim = (self.ntheta)*(self.nbin)*(self.nbin+1)
        covmat = np.zeros((ndim, ndim))
        covmat_file_path = os.path.join(self.data_directory, self.covmat_file)
        if os.path.exists(covmat_file_path):
            covmat = np.loadtxt(covmat_file_path)
        else:
            raise io_mp.LikelihoodError("File not found:\n %s"%covmat_file_path)

        covmat = covmat/self.ah_factor

        # Read angular cut values (OPTIONAL)
        if (self.use_cut_theta):
            cut_values = np.zeros((self.nbin, 2))
            cutvalues_file_path = os.path.join(
                self.data_directory, self.cutvalues_file)
            if os.path.exists(cutvalues_file_path):
                cut_values = np.loadtxt(cutvalues_file_path)
            else:
                raise io_mp.LikelihoodError("File not found:\n %s"%cutvalues_file_path)

        # Normalize selection functions
        self.p_norm = np.zeros(self.nbin, 'float64')
        for Bin in xrange(self.nbin):
            self.p_norm[Bin] = np.sum(0.5*(
                self.p[1:, Bin]+self.p[:-1, Bin])*(
                self.z_p[1:]-self.z_p[:-1]))

        # Compute theta mask
        if (self.use_cut_theta):
            mask = np.zeros(2*nt*self.ntheta)
            iz = 0
            for izl in xrange(self.nbin):
                for izh in xrange(izl, self.nbin):
                    # this counts the bin combinations
                    # iz=1 =>(1,1), iz=2 =>(1,2) etc
                    iz = iz + 1
                    for i in xrange(self.ntheta):
                        j = (iz-1)*2*self.ntheta
                        xi_plus_cut = max(
                            cut_values[izl, 0], cut_values[izh, 0])
                        xi_minus_cut = max(
                            cut_values[izl, 1], cut_values[izh, 1])
                        if (self.theta_bins[i] > xi_plus_cut):
                            mask[j+i] = 1
                        if (self.theta_bins[i] > xi_minus_cut):
                            mask[self.ntheta + j+i] = 1
        else:
            mask = np.ones(2*nt*self.ntheta)

        self.num_mask = np.sum(mask)
        self.mask_indices = np.zeros(self.num_mask)
        j = 0
        for i in xrange(self.ntheta*nt*2):
            if (mask[i] == 1):
                self.mask_indices[j] = i
                j = j+1
        self.mask_indices = np.int32(self.mask_indices)
        # Precompute masked inverse
        self.wl_invcov = np.zeros((self.num_mask, self.num_mask))
        self.wl_invcov = covmat[self.mask_indices][:, self.mask_indices]
        self.wl_invcov = np.linalg.inv(self.wl_invcov)

        # Fill array of discrete z values
        # self.z = np.linspace(0, self.zmax, num=self.nzmax)

        ################
        # Noise spectrum
        ################

        # Number of galaxies per steradian
        self.noise = 3600.*self.gal_per_sqarcmn*(180./math.pi)**2

        # Number of galaxies per steradian per bin
        self.noise = self.noise/self.nbin

        # Noise spectrum (diagonal in bin*bin space, independent of l and Bin)
        self.noise = self.rms_shear**2/self.noise

        ################################################
        # discrete theta values (to convert C_l to xi's)
        ################################################

        thetamin = np.min(self.theta_bins)*0.8
        thetamax = np.max(self.theta_bins)*1.2

        self.nthetatot = np.ceil(math.log(thetamax/thetamin)/self.dlntheta) + 1
        self.nthetatot = np.int32(self.nthetatot)
        self.theta = np.zeros(self.nthetatot, 'float64')
        self.a2r = math.pi/(180.*60.)

        # define an array of theta's
        for it in xrange(self.nthetatot):
            self.theta[it] = thetamin*math.exp(self.dlntheta*it)

        ################################################################
        # discrete l values used in the integral to convert C_l to xi's)
        ################################################################

        # l = x / theta / self.a2r
        # x = l * theta * self.a2r

        # We start by considering the largest theta, theta[-1], and for that value we infer
        # a list of l's from the requirement that corresponding x values are spaced linearly with a given stepsize, until xmax.
        # Then we loop over smaller theta values, in decreasing order, and for each of them we complete the previous list of l's,
        # always requiuring the same dx stepsize (so that dl does vary) up to xmax.
        #
        # We first apply this to a running value ll, in order to count the total numbner of ll's, called nl.
        # Then we create the array lll[nl] and we fill it with the same values.
        #
        # we also compute on the fly the critical index il_max[it] such that ll[il_max[it]]*self.theta[it]*self.a2r
        # is the first value of x above xmax

        ll=1.
        il=0
        while (ll*self.theta[-1]*self.a2r < self.dx_threshold):
            ll += self.dx_below_threshold/self.theta[-1]/self.a2r
            il += 1
        for it  in xrange(self.nthetatot):
            while (ll*self.theta[self.nthetatot-1-it]*self.a2r < self.xmax) and (ll+self.dx_above_threshold/self.theta[self.nthetatot-1-it]/self.a2r < self.lmax):
                ll += self.dx_above_threshold/self.theta[self.nthetatot-1-it]/self.a2r
                il += 1
        self.nl = il+1

        self.lll = np.zeros(self.nl, 'float64')
        self.il_max = np.zeros(self.nthetatot, 'int')
        il=0
        self.lll[il]=1.
        while (self.lll[il]*self.theta[-1]*self.a2r < self.dx_threshold):
            il += 1
            self.lll[il] = self.lll[il-1] + self.dx_below_threshold/self.theta[-1]/self.a2r
        for it  in xrange(self.nthetatot):
            while (self.lll[il]*self.theta[self.nthetatot-1-it]*self.a2r < self.xmax) and (self.lll[il] + self.dx_above_threshold/self.theta[self.nthetatot-1-it]/self.a2r < self.lmax):
                il += 1
                self.lll[il] = self.lll[il-1] + self.dx_above_threshold/self.theta[self.nthetatot-1-it]/self.a2r
            self.il_max[self.nthetatot-1-it] = il

        # finally we compute the array l*dl that will be used in the trapezoidal integration
        # (l is a factor in the integrand [l * C_l * Bessel], and dl is like a weight)
        self.ldl = np.zeros(self.nl, 'float64')
        self.ldl[0]=self.lll[0]*0.5*(self.lll[1]-self.lll[0])
        for il in xrange(1,self.nl-1):
            self.ldl[il]=self.lll[il]*0.5*(self.lll[il+1]-self.lll[il-1])
        self.ldl[-1]=self.lll[-1]*0.5*(self.lll[-1]-self.lll[-2])

        #####################################################################
        # Allocation of various arrays filled and used in the function loglkl
        #####################################################################

        self.r = np.zeros(self.nzmax, 'float64')
        self.dzdr = np.zeros(self.nzmax, 'float64')
        self.g = np.zeros((self.nzmax, self.nbin), 'float64')
        self.pk = np.zeros((self.nlmax, self.nzmax), 'float64')
        self.k_sigma = np.zeros(self.nzmax, 'float64')
        self.alpha = np.zeros((self.nlmax, self.nzmax), 'float64')
        if 'epsilon' in self.use_nuisance:
            self.E_th_nu = np.zeros((self.nlmax, self.nzmax), 'float64')
        self.nbin_pairs = self.nbin*(self.nbin+1)/2
        self.Cl_integrand = np.zeros((self.nzmax, self.nbin_pairs), 'float64')
        self.Cl = np.zeros((self.nlmax, self.nbin_pairs), 'float64')
        if self.theoretical_error != 0:
            self.El_integrand = np.zeros((self.nzmax, self.nbin_pairs),'float64')
            self.El = np.zeros((self.nlmax, self.nbin_pairs), 'float64')
        self.spline_Cl = np.empty(self.nbin_pairs, dtype=(list, 3))
        self.xi1 = np.zeros((self.nthetatot, self.nbin_pairs), 'float64')
        self.xi2 = np.zeros((self.nthetatot, self.nbin_pairs), 'float64')
        self.Cll = np.zeros((self.nbin_pairs,self.nl), 'float64')
        self.BBessel0 = np.zeros(self.nl, 'float64')
        self.BBessel4 = np.zeros(self.nl, 'float64')
        self.xi1_theta = np.empty(self.nbin_pairs, dtype=(list, 3))
        self.xi2_theta = np.empty(self.nbin_pairs, dtype=(list, 3))
        self.xi = np.zeros(np.size(self.xi_obs), 'float64')

        return