Пример #1
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # are there conflicting experiments?
        if 'bao_boss_aniso_gauss_approx' in data.experiments:
            raise io_mp.LikelihoodError(
                'conflicting bao_boss_aniso_gauss_approx measurments')

        # self.z, .hdif, .dafid, and .rsfid are read from the data file

        # load the ansio likelihood
        filepath = os.path.join(self.data_directory, self.file)
        # alpha_perp = D_A / rs (rs / DA)_fid
        # alpha_para = (H rs)_fid / (H rs)
        prob_dtype = [('alpha_perp', np.float64), ('alpha_para', np.float64),
                      ('prob', np.float64)]
        prob = np.loadtxt(filepath,
                          delimiter=None,
                          comments='#',
                          skiprows=0,
                          dtype=prob_dtype)
        size = int(np.sqrt(len(prob)))
        x = prob['alpha_perp'].reshape(size, size)[:, 0]
        y = prob['alpha_para'].reshape(size, size)[0, :]
        Z = prob['prob'].reshape(size, size)
        normZ = np.max(Z)
        Z = Z / normZ
        # use the faster interp.RectBivariateSpline interpolation scheme
        self.prob_interp = interp.RectBivariateSpline(x, y, Z, kx=3, ky=3, s=0)
Пример #2
0
    def loglkl(self, cosmo, data):

        chi2 = 0.

        # for each point, compute angular distance da, radial distance dr,
        # volume distance dv, sound horizon at baryon drag rs_d,
        # theoretical prediction and chi2 contribution
        for i in range(self.num_points):

            da = cosmo.angular_distance(self.z[i])
            rs = cosmo.rs_drag()

            if self.type[i] == 8:
                theo = 180 * rs / (da * np.pi * (1. + self.z[i]))

            else:
                raise io_mp.LikelihoodError("In likelihood %s. " % self.name +
                                            "BAO data type %s " %
                                            self.type[i] +
                                            "in %d-th line not understood" % i)

            chi2 += ((theo - self.data[i]) / self.error[i])**2

        # return ln(L)
        lkl = -0.5 * chi2

        return lkl
Пример #3
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # are there conflicting experiments?
        conflicting_experiments = [
            'bao', 'bao_boss', 'bao_known_rs'
            'bao_boss_aniso', 'bao_boss_aniso_gauss_approx', 'bao_smallz_2014'
        ]
        for experiment in conflicting_experiments:
            if experiment in data.experiments:
                raise io_mp.LikelihoodError('conflicting BAO measurments')

        # define array for values of z and data points
        self.z = np.array([], 'float64')
        self.data = np.array([], 'float64')
        self.error = np.array([], 'float64')
        self.type = np.array([], 'int')

        # read redshifts and data points
        with open(os.path.join(self.data_directory, self.file), 'r') as filein:
            for line in filein:
                if line.strip() and line.find('#') == -1:
                    # the first entry of the line is the identifier
                    this_line = line.split()
                    # insert into array if this id is not manually excluded
                    if not this_line[0] in self.exclude:
                        self.z = np.append(self.z, float(this_line[1]))
                        self.data = np.append(self.data, float(this_line[2]))
                        self.error = np.append(self.error, float(this_line[3]))
                        self.type = np.append(self.type, int(this_line[4]))

        # number of data points
        self.num_points = np.shape(self.z)[0]
Пример #4
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # needed arguments in order to get sigma_8(z) up to z=2 with correct precision
        self.need_cosmo_arguments(data, {'output': 'mPk'})
        self.need_cosmo_arguments(data, {'P_k_max_h/Mpc': '1.'})
        self.need_cosmo_arguments(data, {'z_max_pk': '2.'})

        # are there conflicting experiments?
        if 'bao_fs_boss_dr12' in data.experiments:
            raise io_mp.LikelihoodError('conflicting bao measurments')

        # define arrays for values of z and data points
        self.z = np.array([], 'float64')
        self.fsig8 = np.array([], 'float64')
        self.sfsig8 = np.array([], 'float64')

        # read redshifts and data points
        with open(os.path.join(self.data_directory, self.data_file),
                  'r') as filein:
            for i, line in enumerate(filein):
                if line.strip() and line.find('#') == -1:
                    this_line = line.split()
                    self.z = np.append(self.z, float(this_line[0]))
                    self.fsig8 = np.append(self.fsig8, float(this_line[1]))
                    self.sfsig8 = np.append(self.sfsig8, float(this_line[2]))

        # positions of the data
        self.Wigglez = [13, 14, 15]
        self.SDSS = [19, 20, 21, 22]
        # AP effect corrections
        self.HdAz = [
            5905.2, 5905.2, 5902.17, 27919.8, 40636.6, 45463.8, 47665.2,
            90926.2, 63409.3, 88415.1, 78751., 132588., 102712., 132420.,
            155232., 134060., 179999., 263053., 200977., 242503., 289340.,
            352504.
        ]
        # read covariance matrices
        self.CijWig = np.loadtxt(os.path.join(self.data_directory,
                                              self.cov_Wig_file),
                                 unpack=True)
        self.CijSDSS = np.loadtxt(os.path.join(self.data_directory,
                                               self.cov_SDSS_file),
                                  unpack=True)
        self.Cijfs8 = np.diagflat(np.power(self.sfsig8, 2))
        self.Cijfs8[(self.Wigglez[0] - 1):self.Wigglez[-1],
                    (self.Wigglez[0] - 1):self.Wigglez[-1]] = self.CijWig
        self.Cijfs8[(self.SDSS[0] - 1):self.SDSS[-1],
                    (self.SDSS[0] - 1):self.SDSS[-1]] = self.CijSDSS

        # number of bins
        self.num_points = np.shape(self.z)[0]

        # Scale dependent growth; some params: wavenumber k in Mpc etc. The params for the numerical derivative are a bit jerky, adjust at your own peril.
        self.k0 = 0.1
        self.dz = 0.01
        self.hstep = 0.001
        #Make a mock array; this will contain the redshifts z\in[0,2,0.01] we need for the interpolation
        self.zed = np.arange(0.0, 2.0, self.dz)
Пример #5
0
    def __init__(self, path, data, command_line):

        # Unusual construction, since the data files are not distributed
        # alongside JLA (size problems)
        try:
            Likelihood_sn.__init__(self, path, data, command_line)
        except IOError:
            raise io_mp.LikelihoodError(
                "The JLA data files were not found. Please download the "
                "following link "
                "http://supernovae.in2p3.fr/sdss_snls_jla/jla_likelihood_v4.tgz"
                ", extract it, and copy all files present in "
                "`jla_likelihood_v4/data` to `your_montepython/data/JLA`")

        # Load matrices from text files, whose names were read in the
        # configuration file
        self.C00 = self.read_matrix(self.mag_covmat_file)
        self.C11 = self.read_matrix(self.stretch_covmat_file)
        self.C22 = self.read_matrix(self.colour_covmat_file)
        self.C01 = self.read_matrix(self.mag_stretch_covmat_file)
        self.C02 = self.read_matrix(self.mag_colour_covmat_file)
        self.C12 = self.read_matrix(self.stretch_colour_covmat_file)

        # Reading light-curve parameters from self.data_file (jla_lcparams.txt)
        self.light_curve_params = self.read_light_curve_parameters()
Пример #6
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # are there conflicting experiments?
        if 'bao_boss_aniso' in data.experiments:
            raise io_mp.LikelihoodError(
                'conflicting bao_boss_aniso measurments')

        # define array for values of z and data points
        self.z = np.array([], 'float64')
        self.DA_rdfid_by_rd_in_Mpc = np.array([], 'float64')
        self.DA_error = np.array([], 'float64')
        self.H_rd_by_rdfid_in_km_per_s_per_Mpc = np.array([], 'float64')
        self.H_error = np.array([], 'float64')
        self.cross_corr = np.array([], 'float64')
        self.rd_fid_in_Mpc = np.array([], 'float64')

        # read redshifts and data points
        i = 0
        with open(os.path.join(self.data_directory, self.file), 'r') as filein:
            for i, line in enumerate(filein):
                if line.find('#') == -1:
                    this_line = line.split()
                    # this_line[0] is some identifier
                    self.z = np.append(self.z, float(this_line[1]))
                    self.DA_rdfid_by_rd_in_Mpc = np.append(
                        self.DA_rdfid_by_rd_in_Mpc, float(this_line[2]))
                    self.DA_error = np.append(
                        self.DA_error, float(this_line[3]))
                    self.H_rd_by_rdfid_in_km_per_s_per_Mpc = np.append(
                        self.H_rd_by_rdfid_in_km_per_s_per_Mpc, float(this_line[4]))
                    self.H_error = np.append(
                        self.H_error, float(this_line[5]))
                    self.cross_corr = np.append(
                        self.cross_corr, float(this_line[6]))
                    self.rd_fid_in_Mpc = np.append(
                        self.rd_fid_in_Mpc, float(this_line[7]))

                    # is the cross correlation coefficient valid
                    if self.cross_corr[i] < -1.0 or self.cross_corr[i] > 1.0:
                        raise io_mp.LikelihoodError(
                            "invalid cross correlation coefficient in entry "
                            "%d: %f" % (i, self.cross_corr[i]))

        # number of data points
        self.num_points = np.shape(self.z)[0]
Пример #7
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # needed arguments in order to get sigma_8(z) up to z=1 with correct precision
        if 'K1K_CorrelationFunctions_2cosmos_geo_vs_growth' in data.experiments:
            print('Conflict!- using kids P_K')
        else:
            self.need_cosmo1_arguments(data, {'output': 'mPk'})
            self.need_cosmo1_arguments(data, {'P_k_max_h/Mpc': self.k_max})
            self.need_cosmo2_arguments(data, {'output': 'mPk'})
            self.need_cosmo2_arguments(data, {'P_k_max_h/Mpc': self.k_max})
            self.need_cosmo1_arguments(data, {'z_max_pk': self.k_max})
            self.need_cosmo2_arguments(data, {'z_max_pk': self.k_max})

        # are there conflicting experiments?
        if 'bao_boss_aniso' in data.experiments:
            raise io_mp.LikelihoodError(
                'conflicting bao_boss_aniso measurments')

        # define arrays for values of z and data points
        self.z = np.array([], 'float64')
        self.DM_rdfid_by_rd_in_Mpc = np.array([], 'float64')
        self.H_rd_by_rdfid_in_km_per_s_per_Mpc = np.array([], 'float64')
        self.fsig8 = np.array([], 'float64')

        # read redshifts and data points
        with open(os.path.join(self.data_directory, self.data_file),
                  'r') as filein:
            for i, line in enumerate(filein):
                if line.strip() and line.find('#') == -1:
                    this_line = line.split()
                    # load redshifts and D_M * (r_s / r_s_fid)^-1 in Mpc
                    if this_line[1] == 'dM(rsfid/rs)':
                        self.z = np.append(self.z, float(this_line[0]))
                        self.DM_rdfid_by_rd_in_Mpc = np.append(
                            self.DM_rdfid_by_rd_in_Mpc, float(this_line[2]))
                    # load H(z) * (r_s / r_s_fid) in km s^-1 Mpc^-1
                    elif this_line[1] == 'Hz(rs/rsfid)':
                        self.H_rd_by_rdfid_in_km_per_s_per_Mpc = np.append(
                            self.H_rd_by_rdfid_in_km_per_s_per_Mpc,
                            float(this_line[2]))
                    # load f * sigma8
                    elif this_line[1] == 'fsig8':
                        self.fsig8 = np.append(self.fsig8, float(this_line[2]))

        # read covariance matrix
        self.covmat = np.loadtxt(
            os.path.join(self.data_directory, self.cov_file))
        self.cholesky_transform = cholesky(self.covmat, lower=True)

        # number of bins
        self.num_bins = np.shape(self.z)[0]

        # number of data points
        self.num_points = np.shape(self.covmat)[0]
    def loglkl(self, cosmo1, cosmo2, data):
        #ZP: Being the position of the acoustic peak
        # a strong goemetry phenomenon, calculations
        #are done using cosmo2 as it is the one responsible to
        #parametrize goemetry.

        #However, both cosmologies are called for matters
        #of consistency at the sampler

        # for each point, compute angular distance da, radial distance dr,
        # volume distance dv, sound horizon at baryon drag rs_d,
        # theoretical prediction and chi2 contribution
        prediction = np.array([], dtype='float64')
        for counter, item in enumerate(self.data):

            if self.data_type[counter] == 1:
                if 'ln10^{10}A_s_1' in data.mcmc_parameters:
                    theo = data.mcmc_parameters['ln10^{10}A_s_1']['current']
                else:
                    theo = cosmo1.get_current_derived_parameters(
                        ['ln10^{10}A_s'])['ln10^{10}A_s']

            elif self.data_type[counter] == 2:
                theo = data.mcmc_parameters['n_s_1']['current']

            elif self.data_type[counter] == 3:
                theo = cosmo2.theta_star_100()

            else:
                raise io_mp.LikelihoodError(
                    "In likelihood %s. " % self.name +
                    "BAO data type %s " % self.type[counter] +
                    "in %d-th line not understood" % counter)

            prediction = np.append(prediction, float(theo))

#print prediction
        vec = self.data - prediction
        if np.isinf(vec).any() or np.isnan(vec).any():
            chi2 = 2e12
        else:
            # don't invert that matrix...
            # use the Cholesky decomposition instead:
            yt = solve_triangular(self.cholesky_transform, vec, lower=True)
            chi2 = yt.dot(yt)
        # return ln(L)
        lkl = -chi2 / 2.

        return lkl
Пример #9
0
    def loglkl(self, cosmo1, cosmo2, data):
        #ZP: Being the position of the acoustic peak 
        # a strong goemetry phenomenon, calculations 
        #are done using cosmo2 as it is the one responsible to 
        #parametrize goemetry. 

        #However, both cosmologies are called for matters 
        #of consistency at the sampler 

        chi2 = 0.

        # for each point, compute angular distance da, radial distance dr,
        # volume distance dv, sound horizon at baryon drag rs_d,
        # theoretical prediction and chi2 contribution
        for i in range(self.num_points):

            da = cosmo2.angular_distance(self.z[i])
            dr = self.z[i] / cosmo2.Hubble(self.z[i])
            dv = pow(da * da * (1 + self.z[i]) * (1 + self.z[i]) * dr, 1. / 3.)
            rs = cosmo2.rs_drag()

            if self.type[i] == 3:
                theo = dv / rs

            elif self.type[i] == 4:
                theo = dv

            elif self.type[i] == 5:
                theo = da / rs

            elif self.type[i] == 6:
                theo = 1. / cosmo2.Hubble(self.z[i]) / rs

            elif self.type[i] == 7:
                theo = rs / dv
            else:
                raise io_mp.LikelihoodError(
                    "In likelihood %s. " % self.name +
                    "BAO data type %s " % self.type[i] +
                    "in %d-th line not understood" % i)

            chi2 += ((theo - self.data[i]) / self.error[i]) ** 2

        # return ln(L)
        lkl = - 0.5 * chi2

        return lkl
Пример #10
0
    def __init__(self, path, data, command_line):

        # Unusual construction, since the data files are not distributed
        # alongside Pantheon (size problems)
        try:
            Likelihood_sn.__init__(self, path, data, command_line)
        except IOError:
            raise io_mp.LikelihoodError(
                "The Pantheon data files were not found. Please check if "
                "the following files are in the data/Pantheon directory: "
                "\n-> pantheon.dataset"
                "\n-> lcparam_full_long.txt"
                "\n-> sys_full_long.dat")

        # Load matrices from text files, whose names were read in the
        # configuration file
        self.C00 = self.read_matrix(self.mag_covmat_file)

        # Reading light-curve parameters from self.data_file (lcparam_full_long.txt)
        self.light_curve_params = self.read_light_curve_parameters()

        # Reordering by J. Renk. The following steps can be computed in the
        # initialisation step as they do not depend on the point in parameter-space
        #   -> likelihood evaluation is 30% faster

        # Compute the covariance matrix
        # The module numexpr is used for doing quickly the long multiplication
        # of arrays (factor of 3 improvements over numpy). It is used as a
        # replacement of blas routines cblas_dcopy and cblas_daxpy
        # For numexpr to work, we need (seems like a bug, but anyway) to create
        # local variables holding the arrays. This cost no time (it is a simple
        # pointer assignment)
        C00 = self.C00
        covm = ne.evaluate("C00")

        sn = self.light_curve_params

        # Update the diagonal terms of the covariance matrix with the
        # statistical error
        covm += np.diag(sn.dmb**2)

        # Whiten the residuals, in two steps.
        # Step 1) Compute the Cholesky decomposition of the covariance matrix, in
        # place. This is a time expensive (0.015 seconds) part, which is why it is
        # now done in init. Note that this is different to JLA, where it needed to
        # be done inside the loglkl function.
        self.cov = la.cholesky(covm, lower=True, overwrite_a=True)
Пример #11
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        # are there conflicting experiments?
        conflicting_experiments = [
            'bao', 'bao_boss', 'bao_known_rs'
            'bao_boss_aniso', 'bao_boss_aniso_gauss_approx'
        ]
        for experiment in conflicting_experiments:
            if experiment in data.experiments:
                raise io_mp.LikelihoodError('conflicting BAO measurments')

        # define arrays for values of z and data points
        self.z = np.array([], 'float64')
        self.DM_rdfid_by_rd_in_Mpc = np.array([], 'float64')
        self.H_rd_by_rdfid_in_km_per_s_per_Mpc = np.array([], 'float64')

        # read redshifts and data points
        with open(os.path.join(self.data_directory, self.data_file),
                  'r') as filein:
            for i, line in enumerate(filein):
                if line.strip() and line.find('#') == -1:
                    this_line = line.split()
                    # load redshifts and D_M * (r_s / r_s_fid)^-1 in Mpc
                    if this_line[1] == 'dM(rsfid/rs)':
                        self.z = np.append(self.z, float(this_line[0]))
                        self.DM_rdfid_by_rd_in_Mpc = np.append(
                            self.DM_rdfid_by_rd_in_Mpc, float(this_line[2]))
                    # load H(z) * (r_s / r_s_fid) in km s^-1 Mpc^-1
                    elif this_line[1] == 'Hz(rs/rsfid)':
                        self.H_rd_by_rdfid_in_km_per_s_per_Mpc = np.append(
                            self.H_rd_by_rdfid_in_km_per_s_per_Mpc,
                            float(this_line[2]))

        # read covariance matrix
        self.cov_data = np.loadtxt(
            os.path.join(self.data_directory, self.cov_file))

        # number of bins
        self.num_bins = np.shape(self.z)[0]

        # number of data points
        self.num_points = np.shape(self.cov_data)[0]
Пример #12
0
    def __init__(self, path, data, command_line):

        # Unusual construction, since the data files are not distributed
        # alongside Pantheon (size problems)
        try:
            Likelihood_sn.__init__(self, path, data, command_line)
        except IOError:
            raise io_mp.LikelihoodError(
                "The Pantheon data files were not found. Please check if "
                "the following files are in the data/Pantheon directory: "
                "\n-> pantheon.dataset"
                "\n-> lcparam_full_long.txt"
                "\n-> sys_full_long.dat")

        # Load matrices from text files, whose names were read in the
        # configuration file
        self.C00 = self.read_matrix(self.mag_covmat_file)

        # Reading light-curve parameters from self.data_file (lcparam_full_long.txt)
        self.light_curve_params = self.read_light_curve_parameters()
Пример #13
0
    def loglkl(self, cosmo, data):

        chi2 = 0.

        # for each point, compute angular distance da, radial distance dr,
        # volume distance dv, sound horizon at baryon drag rs_d,
        # theoretical prediction and chi2 contribution
        for i in range(self.num_points):

            da = cosmo.angular_distance(self.z[i])
            dr = self.z[i] / cosmo.Hubble(self.z[i])
            dv = pow(da * da * (1 + self.z[i]) * (1 + self.z[i]) * dr, 1. / 3.)
            rs = cosmo.rs_drag()

            if self.type[i] == 3:
                theo = dv / rs

            elif self.type[i] == 4:
                theo = dv

            elif self.type[i] == 5:
                theo = da / rs

            elif self.type[i] == 6:
                theo = 1. / cosmo.Hubble(self.z[i]) / rs

            elif self.type[i] == 7:
                theo = rs / dv
            else:
                raise io_mp.LikelihoodError("In likelihood %s. " % self.name +
                                            "BAO data type %s " %
                                            self.type[i] +
                                            "in %d-th line not understood" % i)

            chi2 += ((theo - self.data[i]) / self.error[i])**2

        # return ln(L)
        lkl = -0.5 * chi2

        return lkl
Пример #14
0
    def loglkl(self, cosmo, data):

        chi2 = 0.

        # for each point, compute angular distance da, radial distance dr,
        # volume distance dv, sound horizon at baryon drag rs_d,
        # theoretical prediction and chi2 contribution
        # classes: (D_V/rs=3, Dv/Mpc=4, DA/rs=5, c/Hrs=6, rs/D_v=7, D_M/rs=8, H rs/rs_fid=9, D_M rs_fid/rs=10)
        for i in range(self.num_points):

            da = cosmo.angular_distance(self.z[i])
            dr = self.z[i] / cosmo.Hubble(self.z[i])
            H = cosmo.Hubble(self.z[i]) * const.c / 1000.

            dv = pow(da * da * (1 + self.z[i]) * (1 + self.z[i]) * dr, 1. / 3.)
            dm = da * (1 + self.z[i])

            rd = cosmo.rs_drag() * self.rd_rescale

            if (self.types[i] == set([5, 6])):
                transverse = da / rd
                parallel = (const.c / 1000.) / (H * rd)
                chi2 += self.chi2_interpolators.get_Dchi2_from_distances(
                    transverse, parallel, corr_type=self.corr_types[i])
            elif (self.types[i] == set([8, 6])):
                transverse = dm / rd
                parallel = (const.c / 1000.) / (H * rd)
                chi2 += self.chi2_interpolators.get_Dchi2_from_distances(
                    transverse, parallel, corr_type=self.corr_types[i])
            else:
                raise io_mp.LikelihoodError(
                    "In likelihood %s. " % self.name +
                    "BAO data types %s " % self.types[i] +
                    "in %d-th line not appropriately chosen." % i)

        # return ln(L)
        lkl = -0.5 * chi2

        return lkl
Пример #15
0
    def __init__(self, path, data, command_line):

        # This reads the configuration file as well
        try:
            Likelihood_sn.__init__(self, path, data, command_line)
        except IOError:
            raise io_mp.LikelihoodError(
                "The JLA data files were not found. Please download the "
                "following link "
                "http://supernovae.in2p3.fr/sdss_snls_jla/jla_likelihood_v4.tgz"
                ", extract it, and copy all files present in "
                "`jla_likelihood_v4/data` to `your_montepython/data/JLA`")

        # read the only matrix
        self.C00 = self.read_matrix(self.mu_covmat_file)

        # Read the simplified light-curve self.data_file
        self.light_curve_params = self.read_light_curve_parameters()

        # The covariance matrix can be already inverted, once and for all
        # (cholesky)
        self.C00 = la.cholesky(self.C00, lower=True, overwrite_a=True)
Пример #16
0
    def loglkl(self, cosmo, data):

        YHe = cosmo.get_current_derived_parameters(['YHe'])['YHe']
        # TODO: change classy.pyx for more direct access to YHe
        # eta10 from eq.(9) in https://arxiv.org/pdf/0809.0631.pdf, assuming a standard value of Newton's constant
        eta10 = 273.45 * cosmo.omega_b() / (1. - 0.007 * YHe) * pow(
            2.725 / cosmo.T_cmb(), 3)
        dNeff = cosmo.Neff() - self.Neff0

        #TODO :: ideally, we would want to get N_eff from a call to class at BBN time, not like this

        if (eta10 < self.eta10_bounds[0]):
            raise io_mp.LikelihoodError(
                "The value of eta10 = %e was below the BBN table. Aborting." %
                eta10)
        if (eta10 > self.eta10_bounds[1]):
            raise io_mp.LikelihoodError(
                "The value of eta10 = %e was above the BBN table. Aborting." %
                eta10)

        if (dNeff < self.dNeff_bounds[0]):
            raise io_mp.LikelihoodError(
                "The value of delta Neff = %e was below the BBN table. Aborting."
                % dNeff)
        if (dNeff > self.dNeff_bounds[1]):
            raise io_mp.LikelihoodError(
                "The value of delta Neff = %e was above the BBN table. Aborting."
                % dNeff)

        yp = self.get_Yp(eta10, dNeff)
        dh = self.get_DH(eta10, dNeff)
        yperr = self.get_Yperr(eta10, dNeff)
        dherr = self.get_DHerr(eta10, dNeff)

        try:
            if len(yp) > 0:
                yp = yp[0]
            if len(dh) > 0:
                dh = dh[0]
            if len(yperr) > 0:
                yperr = yperr[0]
            if len(dherr) > 0:
                dherr = dherr[0]
        except:
            pass
        #print("From (eta10,N_eff): Theoretical : Y_p = {:.5g} \pm {:.5g} , D_H = {:.5g} \pm {:.5g}".format(yp,yperr,dh,dherr))

        chi_square = 0.
        #Deal with deuterium
        if "dh" in self.include_bbn_type:
            chisquare_dh = (dh - self.dh_cooke_mean)**2 / (
                self.dh_cooke_one_sig**2 + dherr**2)
            chi_square += chisquare_dh
            #print("Chi square DH = ",chisquare_dh)

        #Deal with helium
        if "yp" in self.include_bbn_type:
            try:
                if 'cooke' in self.yp_measurement_type:
                    if (yp > self.yp_cooke_mean):
                        chisquare_yp = (yp - self.yp_cooke_mean)**2 / (
                            self.yp_cooke_one_sig_p**2 + yperr**2)
                    else:
                        chisquare_yp = (yp - self.yp_cooke_mean)**2 / (
                            self.yp_cooke_one_sig_m**2 + yperr**2)
                elif ('aver' in self.yp_measurement_type):
                    chisquare_yp = (yp - self.yp_means['aver2015'])**2 / (
                        self.yp_sigs['aver2015']**2 + yperr**2)
                elif ('peimbert' in self.yp_measurement_type):
                    chisquare_yp = (yp - self.yp_means['peimbert2016'])**2 / (
                        self.yp_sigs['peimbert2016']**2 + yperr**2)
                elif ('izotov' in self.yp_measurement_type):
                    chisquare_yp = (yp - self.yp_means['izotov2014'])**2 / (
                        self.yp_sigs['izotov2014']**2 + yperr**2)
                else:
                    raise io_mp.LikelihoodError(
                        "Unrecognized experimental value of yp")
            except KeyError:
                raise io_mp.LikelihoodError(
                    "Unrecognized experimental value of yp")
            chi_square += chisquare_yp
            #print("Chi square YP = ",chisquare_yp)

        return -0.5 * chi_square
Пример #17
0
    def loglkl(self, cosmo, data):

        # Write fiducial model spectra if needed (return an imaginary number in
        # that case)
        if self.fid_values_exist is False:

            # open file where fiducial model will be written and write header
            fid_file = open(
                os.path.join(self.data_directory, self.fiducial_file), 'w')
            fid_file.write('# Fiducial parameters')
            for key, value in io_mp.dictitems(data.mcmc_parameters):
                fid_file.write(', %s = %.5g' %
                               (key, value['current'] * value['scale']))
            fid_file.write('\n')

            # open sensititivy file and ready relative errors
            if os.path.exists(
                    os.path.join(self.data_directory, self.sensitivity)):

                sensitivity = np.loadtxt(
                    os.path.join(
                        os.path.join(self.data_directory, self.sensitivity)))
                self.num_points = np.shape(sensitivity)[0]

                self.relative_error = np.array([], 'float64')

                for i in range(self.num_points):
                    self.z = np.append(self.z, sensitivity[i, 0])
                    self.type = np.append(self.type, self.error_type)
                    self.relative_error = np.append(
                        self.relative_error,
                        0.01 * sensitivity[i, self.error_column])
            else:
                raise io_mp.LikelihoodError("Could not find file ",
                                            self.sensitivity)

        chi2 = 0.

        # for each point, compute angular distance da, radial distance dr,
        # volume distance dv, sound horizon at baryon drag rs_d,
        # theoretical prediction and chi2 contribution
        for i in range(self.num_points):

            da = cosmo.angular_distance(self.z[i])
            dr = self.z[i] / cosmo.Hubble(self.z[i])
            dv = pow(da * da * (1 + self.z[i]) * (1 + self.z[i]) * dr, 1. / 3.)
            rs = cosmo.rs_drag()

            if self.type[i] == 3:
                theo = dv / rs

            elif self.type[i] == 4:
                theo = dv

            elif self.type[i] == 5:
                theo = da / rs

            elif self.type[i] == 6:
                theo = 1. / cosmo.Hubble(self.z[i]) / rs

            elif self.type[i] == 7:
                theo = rs / dv
            else:
                raise io_mp.LikelihoodError("In likelihood %s. " % self.name +
                                            "BAO data type %s " %
                                            self.type[i] +
                                            "in %d-th line not understood" % i)

            if self.fid_values_exist is True:
                chi2 += ((theo - self.data[i]) / self.error[i])**2
            else:
                sigma = theo * self.relative_error[i]
                fid_file.write(self.nickname)
                fid_file.write("   %.8g  %.8g  %.8g %5d \n" %
                               (self.z[i], theo, sigma, self.type[i]))

        # Exit after writing fiducial file
        # (return an imaginary number to let the sampler know that fiducial models were just created)
        if self.fid_values_exist is False:
            print('\n\n')
            warnings.warn(
                "Writing fiducial model in %s, for %s likelihood" %
                (self.data_directory + '/' + self.fiducial_file, self.name))
            return 1j

        # return ln(L)
        lkl = -0.5 * chi2

        return lkl
Пример #18
0
    def __init__(self, path, data, command_line):
        # Unusual construction, since the data files are not distributed
        # alongside CosmoLSS (size problems)
        try:
            # Read the .dataset file specifying the data.
            super(CosmoLSS, self).__init__(path, data, command_line)
        except IOError:
            raise io_mp.LikelihoodError(
                "The CosmoLSS data files were not found. Please download the "
                "following link "
                "httpsmomc.tgz"
                ", extract it, and copy the BK14 folder inside"
                "`BK14_cosmomc/data/` to `your_montepython/data/`")

        arguments = {
            'output': 'mPk',
            'non linear': 'HALOFIT',
            'z_pk': '0.0, 100.0',
            'P_k_max_h/Mpc': 100.0
        }
        self.need_cosmo_arguments(data, arguments)

        if (self.set_scenario == 3) and not self.use_conservative:
            self.size_covallmask = 210  #!postmask, fiducial
        else:
            self.size_covallmask = 186  #!postmask, conservative

        if (self.set_scenario == 1):  #!fiducial KiDS masking
            sizcovishpremask = 180  #!pre-masking
            sizcovish = self.size_covmask  #!post-masking
        elif ((self.set_scenario == 3) and not self.use_conservative):
            sizcovishpremask = 340  #!pre-masking
            sizcovish = self.size_covallmask  #!post-masking
        elif ((self.set_scenario == 3) and self.use_conservative):
            sizcovishpremask = 332  #!pre-masking
            sizcovish = self.size_covallmask  #!post-masking
        #What if scenario is not 1 or 3?

        sizcovishsq = sizcovish**2
        self.sizcov = sizcovish
        self.sizcovpremask = sizcovishpremask

        # !!!!!Lens redshifts!!!!!!!
        self.lens_redshifts = {}
        for samplename in ['cmass', 'lowz', '2dfloz', '2dfhiz']:
            self.lens_redshifts[samplename] = np.loadtxt(
                self.data_directory + '/lensingrsdfiles/nz_' + samplename +
                '_modelsj.dat')
            self.lens_redshifts[samplename][:, 1] /= (
                np.sum(self.lens_redshifts[samplename][:, 1]) * 0.01)

        # !!!Reading in source distributions
        self.arraysjfull = []
        for nz in range(4):
            fname = self.data_directory + '/lensingrsdfiles/hendriknz/nz_z' + str(
                nz + 1) + '_kids_boot' + str(0) + '.dat'  #!bootstrap DIR
            tmp = np.loadtxt(fname)
            tmp[:, 1] /= (np.sum(tmp[:, 1]) * 0.05)
            self.arraysjfull.append(tmp)

        #!!!Reading in measurements and masks and covariances
        if (self.set_scenario == 1):
            xipmtemp = np.loadtxt(
                self.data_directory +
                '/lensingrsdfiles/xipmcut_kids_regcomb_blind2_swinburnesj.dat',
                usecols=(1, ))
            masktemp = np.loadtxt(
                self.data_directory +
                '/lensingrsdfiles/xipm_kids4tom_selectsj.dat',
                usecols=(1, ))
            covxipmtemp = np.loadtxt(
                self.data_directory +
                '/lensingrsdfiles/xipmcutcov_kids_regcomb_blind2_swinburnesj.dat',
                usecols=(2, ))
        elif (self.set_scenario == 3 and not self.use_conservative):
            xipmtemp = np.loadtxt(
                self.data_directory +
                '/lensingrsdfiles/xipmgtpllarge4_kids_regcomb_blind2sj.dat',
                usecols=(1, ))
            masktemp = np.loadtxt(
                self.data_directory +
                '/lensingrsdfiles/xipmgtpllarge4_kids4tom_selectsj.dat',
                usecols=(1, ))
            covxipmtemp = np.loadtxt(
                self.data_directory +
                '/lensingrsdfiles/xipmgtpllarge4cov_kids_regcomb_blind2sj.dat',
                usecols=(2, ))
        elif (self.set_scenario == 3 and self.use_conservative):
            xipmtemp = np.loadtxt(
                self.data_directory +
                '/lensingrsdfiles/xipmgtpllarge7_kids_regcomb_blind2sj.dat',
                usecols=(1, ))
            masktemp = np.loadtxt(
                self.data_directory +
                '/lensingrsdfiles/xipmgtpllarge7_kids4tom_selectsj.dat',
                usecols=(1, ))
            covxipmtemp = np.loadtxt(
                self.data_directory +
                '/lensingrsdfiles/xipmgtpllarge7cov_kids_regcomb_blind2sj.dat',
                usecols=(2, ))
        #!Else missing
        self.xipm = xipmtemp
        #print masktemp.shape
        self.maskelements = masktemp
        #print covxipmtemp.shape, sizcovish
        self.covxipm = covxipmtemp.reshape(sizcovish, sizcovish)
        # Invert covariance matrix
        self.invcovxipm = la.inv(self.covxipm)

        #!converted from arcmin to degrees
        self.thetacfhtini = np.array([
            0.71336, 1.45210, 2.95582, 6.01675, 12.24745, 24.93039, 50.74726,
            103.29898, 210.27107
        ]) / 60.0  #!athena angular scales (deg) --- 9 ang bins
        self.thetaradcfhtini = self.thetacfhtini * np.pi / 180.0  #!converted to rad
        self.ellgentestarrini = np.array(range(2, 59001), dtype='float64')

        #!compute Bessel functions
        # I want a column order Fortran contiguous memory block of the form (iii, jjj), so most convenient to form the argument first.
        # We compute the outer product and flatten the matrix
        besarg = np.outer(self.ellgentestarrini,
                          self.thetaradcfhtini).flatten(order='C')
        self.bes0arr = scipy.special.jn(0, besarg)
        self.bes2arr = scipy.special.jn(2, besarg)
        self.bes4arr = scipy.special.jn(4, besarg)

        # Initialise multipole overlaps
        # Default settings for MultipoleOverlap Class, can be overwritten at initialisation:
        # k_min_theory=0.0, dk_theory=0.05, z_eff=0.57, size_convolution=30, k_num_conv=10,k_spacing_obs=0.05,k_min_obs=0.075,k_0175 = False, fit_k_0125 = False,fit_k_0075 = False
        CosmoLSS.mp_overlaps = {
            'cmass': self.MultipoleOverlap(k_fit=self.kfit_cmass),
            'lowz': self.MultipoleOverlap(z_eff=0.32, k_fit=self.kfit_lowz),
            '2dfloz': self.MultipoleOverlap(z_eff=0.31,
                                            k_fit=self.kfit_2dfloz),
            '2dfhiz': self.MultipoleOverlap(z_eff=0.56, k_fit=self.kfit_2dfhiz)
        }

        # Read convolution matrix for the used overlaps from files and push to Fortran
        name_to_number = {'cmass': 1, 'lowz': 2, '2dfloz': 3, '2dfhiz': 4}
        for key, value in self.mp_overlaps.iteritems():
            if key == 'lowz':
                fname_from_dataset = self.LOWZ_overlap_conv
            elif key == 'cmass':
                fname_from_dataset = self.CMASS_overlap_conv
            elif key == '2dfloz':
                fname_from_dataset = self.twodfloz_overlap_conv
            elif key == '2dfhiz':
                fname_from_dataset = self.twodfhiz_overlap_conv
            fname_from_dataset = fname_from_dataset.strip(r'%DATASETDIR%')
            fname = os.path.join(self.data_directory, fname_from_dataset)
            value.ReadConvolutionMatrix(fname)
            # Push data pointers to Fortran
            intParams = np.array(
                [value.k_num_obs, value.size_convolution, value.k_num_conv],
                dtype='int32')
            realParams = np.array([
                value.z_eff, value.k_min_theory, value.dk_theory,
                value.k_spacing_obs, value.k_min_obs
            ])
            set_mp_overlap(value.ConvolutionMatrix, value.size_convolution,
                           intParams, realParams, name_to_number[key])

        # Set the logk and z arrays which will de used in the power spectrum interpolation.
        self.Nk = 203
        self.Nz = 38
        self.logkh_for_pk = np.linspace(-4, 2.2, self.Nk)
        self.kh_for_pk = 10**self.logkh_for_pk
        self.z_for_pk = np.linspace(0, 4.1, self.Nz)

        # Push some fields in the python class to the corresponding derived type in Fortran, called this:
        intParams = np.array([
            self.size_cov, self.size_covmask, self.klinesum, self.set_scenario,
            self.size_covallmask
        ],
                             dtype='int32')
        logParams = np.array([
            self.use_morell, self.use_rombint, self.use_conservative,
            self.use_cmass_overlap, self.use_lowz_overlap,
            self.use_2dfloz_overlap, self.use_2dfhiz_overlap
        ],
                             dtype='int32')

        set_this(self.lens_redshifts['lowz'], self.lens_redshifts['2dfloz'],
                 self.lens_redshifts['cmass'], self.lens_redshifts['2dfhiz'],
                 self.xipm, self.invcovxipm, self.sizcov,
                 self.ellgentestarrini, self.maskelements,
                 len(self.maskelements), self.bes0arr, self.bes4arr,
                 self.bes2arr, intParams, logParams)
Пример #19
0
    def loglkl(self, cosmo, data):

        # Recover Cl_s from CLASS, which is a dictionary, with the method
        # get_cl from the Likelihood class, because it already makes the
        # conversion to uK^2.
        dict_Cls = self.get_cl(cosmo, self.l_max)

        # Convert the dict to the same format expected by BICEP
        # that is:
        # 0: TT
        # 1: TE
        # 2: EE
        # 3: BB
        # 6: ET, and the rest to 0 (must be an array of width 9)
        # Warnings: BICEP2 expects l*(l+1)*Cl/(2*pi) in units of uK^2
        cosmo_Cls = np.zeros((self.l_max + 1, 9))
        ell = np.arange(self.l_max + 1)

        cosmo_Cls[:, 0] = ell * (ell + 1) * dict_Cls['tt'] / (2 * math.pi)
        cosmo_Cls[:, 1] = ell * (ell + 1) * dict_Cls['te'] / (2 * math.pi)
        cosmo_Cls[:, 2] = ell * (ell + 1) * dict_Cls['ee'] / (2 * math.pi)
        cosmo_Cls[:, 3] = ell * (ell + 1) * dict_Cls['bb'] / (2 * math.pi)
        cosmo_Cls[:, 6] = ell * (ell + 1) * dict_Cls['te'] / (2 * math.pi)

        # Now loop over all the fields
        loglkl = 0
        for index, field in enumerate(self.fields):
            # Get the expectation value of the data considering this theoretical
            # model, for all the required fields.
            expectation_value = bu.calc_expvals(ell, cosmo_Cls,
                                                self.bpwf_l[index],
                                                self.bpwf_Cs_l[index])

            # Fill the C_l matrix
            if field == "T":
                self.C_l[index][:, 0, 0] = expectation_value[:, 0]
            elif field == 'E':
                self.C_l[index][:, 0, 0] = expectation_value[:, 2]
            elif field == 'B':
                self.C_l[index][:, 0, 0] = expectation_value[:, 3]
            elif field == "EB":
                self.C_l[index][:, 0, 0] = expectation_value[:, 2]
                self.C_l[index][:, 0, 1] = expectation_value[:, 5]
                self.C_l[index][:, 1, 0] = expectation_value[:, 5]
                self.C_l[index][:, 1, 1] = expectation_value[:, 3]
            elif field == "TB":
                self.C_l[index][:, 0, 0] = expectation_value[:, 0]
                self.C_l[index][:, 0, 1] = expectation_value[:, 4]
                self.C_l[index][:, 1, 0] = expectation_value[:, 5]
                self.C_l[index][:, 1, 1] = expectation_value[:, 3]
            elif field == "TE":
                self.C_l[index][:, 0, 0] = expectation_value[:, 0]
                self.C_l[index][:, 0, 1] = expectation_value[:, 1]
                self.C_l[index][:, 1, 0] = expectation_value[:, 1]
                self.C_l[index][:, 1, 1] = expectation_value[:, 2]
            else:
                raise io_mp.LikelihoodError(
                    "BICEP2 requires a field to compute the likelihood"
                    " which should so far be T, E or B, but was read to"
                    " be '%s'" % field)

            # Add the noise
            self.C_l[index] += self.N_l[index]

            # Actually compute the likelihood
            loglkl += bu.evaluateLikelihood(self.C_l[index],
                                            self.C_l_hat[index],
                                            self.C_fl[index],
                                            self.M_inv[index])

        return loglkl
Пример #20
0
    def __init__(self, path, data, command_line):
        # Unusual construction, since the data files are not distributed
        # alongside BK14 (size problems)
        try:
            # Read the .dataset file specifying the data.
            super(BK14, self).__init__(path, data, command_line)
        except IOError:
            raise io_mp.LikelihoodError(
                "The BK14 data files were not found. Please download the "
                "following link "
                "http://bicepkeck.org/BK14_datarelease/BK14_cosmomc.tgz"
                ", extract it, and copy the BK14 folder inside"
                "`BK14_cosmomc/data/` to `your_montepython/data/`")

        # Require tensor modes from CLASS as well as nonlinear lensing.
        # Nonlinearities enhance the B-mode power spectrum by more than 6%
        # at l>100. (Even more at l>2000, but not relevant to BICEP.)
        # See http://arxiv.org/abs/astro-ph/0601594.
        arguments = {
            'output': 'tCl pCl lCl',
            'lensing': 'yes',
            'modes': 's, t',
            'l_max_scalars': 2000,
            'k_max_tau0_over_l_max': 7.0,
            'non linear': 'HALOFIT' if self.do_nonlinear else '',
            'accurate_lensing': 1,
            'l_max_tensors': self.cl_lmax
        }
        self.need_cosmo_arguments(data, arguments)

        map_names_used = self.map_names_used.split()
        map_fields = self.map_fields.split()
        map_names = self.map_names.split()
        self.map_fields_used = [
            maptype for i, maptype in enumerate(map_fields)
            if map_names[i] in map_names_used
        ]

        nmaps = len(map_names_used)
        ncrossmaps = nmaps * (nmaps + 1) / 2
        nbins = int(self.nbins)

        ## This constructs a different flattening of triangular matrices.
        ## v = [m for n in range(nmaps) for m in range(n,nmaps)]
        ## w = [m for n in range(nmaps) for m in range(nmaps-n)]
        ## # Store the indices in a tuple of integer arrays for later use.
        ## self.flat_to_diag = (np.array(v),np.array(w))

        # We choose the tril_indices layout for flat indexing of the triangular matrix
        self.flat_to_diag = np.tril_indices(nmaps)
        self.diag_to_flat = np.zeros((nmaps, nmaps), dtype='int')
        # It is now easy to generate an array with the corresponding flattened indices. (We only fill the lower triangular part.)
        self.diag_to_flat[self.flat_to_diag] = list(range(ncrossmaps))

        # Read in bandpasses
        self.ReadBandpasses()

        # Read window bins
        self.window_data = np.zeros(
            (int(self.nbins), int(self.cl_lmax), ncrossmaps))
        # Retrieve mask and index permutation of windows:
        indices, mask = self.GetIndicesAndMask(
            self.bin_window_in_order.split())
        for k in range(nbins):
            windowfile = os.path.join(
                self.data_directory,
                self.bin_window_files.replace('%u', str(k + 1)))
            tmp = pd.read_table(windowfile,
                                comment='#',
                                sep=' ',
                                header=None,
                                index_col=0).as_matrix()
            # Apply mask
            tmp = tmp[:, mask]
            # Permute columns and store this bin
            self.window_data[k][:, indices] = tmp
        # print('window_data',self.window_data.shape)

        #Read covmat fiducial
        # Retrieve mask and index permutation for a single bin.
        indices, mask = self.GetIndicesAndMask(self.covmat_cl.split())
        # Extend mask and indices. Mask just need to be copied, indices needs to be increased:
        superindices = []
        supermask = []
        for k in range(nbins):
            superindices += [idx + k * ncrossmaps for idx in indices]
            supermask += list(mask)
        supermask = np.array(supermask)

        tmp = pd.read_table(os.path.join(self.data_directory,
                                         self.covmat_fiducial),
                            comment='#',
                            sep=' ',
                            header=None,
                            skipinitialspace=True).as_matrix()
        # Apply mask:
        tmp = tmp[:, supermask][supermask, :]
        print('Covmat read with shape', tmp.shape)
        # Store covmat in correct order
        self.covmat = np.zeros((nbins * ncrossmaps, nbins * ncrossmaps))
        for index_tmp, index_covmat in enumerate(superindices):
            self.covmat[index_covmat, superindices] = tmp[index_tmp, :]

        #Compute inverse and store
        self.covmat_inverse = la.inv(self.covmat)
        # print('covmat',self.covmat.shape)
        # print(self.covmat_inverse)

        nbins = int(self.nbins)
        # Read noise:
        self.cl_noise_matrix = self.ReadMatrix(self.cl_noise_file,
                                               self.cl_noise_order)

        # Read Chat and perhaps add noise:
        self.cl_hat_matrix = self.ReadMatrix(self.cl_hat_file,
                                             self.cl_hat_order)
        if not self.cl_hat_includes_noise:
            for k in range(nbins):
                self.cl_hat_matrix[k] += self.cl_noise_matrix[k]

        # Read cl_fiducial and perhaps add noise:
        self.cl_fiducial_sqrt_matrix = self.ReadMatrix(self.cl_fiducial_file,
                                                       self.cl_fiducial_order)
        if not self.cl_fiducial_includes_noise:
            for k in range(nbins):
                self.cl_fiducial_sqrt_matrix[k] += self.cl_noise_matrix[k]
        # Now take matrix square root:
        for k in range(nbins):
            self.cl_fiducial_sqrt_matrix[k] = la.sqrtm(
                self.cl_fiducial_sqrt_matrix[k])
Пример #21
0
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        if "parthenope" in self.usedata:
          # Read grid 'parth' from Parthenope (omegab,DeltaN,YpBBN,DH) and prepare interpolation
          parth_data = np.loadtxt(os.path.join(self.data_directory, self.parthenopefile),usecols=(0,2,4,6))
          omegab_size_parth = 48
          deltaneff_size_parth = 11
          if(len(parth_data) != omegab_size_parth*deltaneff_size_parth):
            raise io_mp.LikelihoodError(
                "In likelihood %s: " % self.name +
                "BBN data from file '%s' " % os.path.join(self.data_directory, self.parthenopefile) +
                "in incorrect format.")

          # for omega_b, take first omegab_size points in 1st column
          omegab_parth_data = np.array(parth_data[:omegab_size_parth,0])
          # for deltaneff, take one point every omegab_size points in 2nd column
          deltaneff_parth_data = np.array(parth_data[::omegab_size_parth,1])
          ypbbn_parth_data = np.array(parth_data[:,2]).reshape(deltaneff_size_parth,omegab_size_parth)
          dh_parth_data = 1.e5*np.array(parth_data[:,3]).reshape(deltaneff_size_parth,omegab_size_parth)
          YpBBN_parth = interpolate.interp2d(omegab_parth_data,deltaneff_parth_data,ypbbn_parth_data,kind='cubic')
          DH_parth = interpolate.interp2d(omegab_parth_data,deltaneff_parth_data,dh_parth_data,kind='cubic')
          # ????
          dh_parth_DeltaN0 = 1.e5*np.array(parth_data[3*omegab_size_parth:4*omegab_size_parth,3])
          Omegab_parth = interpolate.interp1d(dh_parth_DeltaN0,omegab_parth_data,kind='cubic')
          #print YpBBN_parth(0.022,0.)
          #print DH_parth(0.022,0.)
          self.get_Yp = YpBBN_parth
          self.get_DH = DH_parth

          self.get_Yperr = (lambda x,y: 0.5*self.two_sig_neutron_lifetime)
          self.get_DHerr = (lambda x,y: 0.5*self.two_sig_dh_parth)

          self.omegab_bounds = [omegab_parth_data[0],omegab_parth_data[-1]]
          self.dNeff_bounds = [deltaneff_parth_data[0],deltaneff_parth_data[-1]]

        elif "marcucci" in self.usedata:
          # Read grid 'marcucci' from Parthenope (omegab,DeltaN,YpBBN,DH) and prepare interpolation
          marcucci_data = np.loadtxt(os.path.join(self.data_directory, self.marcuccifile),usecols=(0,2,4,6))
          omegab_size_marcucci = 48
          deltaneff_size_marcucci = 11
          if(len(marcucci_data) != omegab_size_marcucci*deltaneff_size_marcucci):
            raise io_mp.LikelihoodError(
                "In likelihood %s: " % self.name +
                "BBN data from file '%s' " % os.path.join(self.data_directory, self.marcuccifile) +
                "in incorrect format.")
          # for omega_b, take first omegab_size_marcucci points in 1st column
          omegab_marcucci_data = np.array(marcucci_data[:omegab_size_marcucci,0])
          # for deltaneff, take one poiunt every omegab_size_marcucci points in 2nd column
          deltaneff_marcucci_data = np.array(marcucci_data[::omegab_size_marcucci,1])
          ypbbn_marcucci_data = np.array(marcucci_data[:,2]).reshape(deltaneff_size_marcucci,omegab_size_marcucci)
          dh_marcucci_data = 1.e5*np.array(marcucci_data[:,3]).reshape(deltaneff_size_marcucci,omegab_size_marcucci)
          YpBBN_marcucci = interpolate.interp2d(omegab_marcucci_data,deltaneff_marcucci_data,ypbbn_marcucci_data,kind='cubic')
          DH_marcucci = interpolate.interp2d(omegab_marcucci_data,deltaneff_marcucci_data,dh_marcucci_data,kind='cubic')
          # ????
          dh_marcucci_DeltaN0 = 1.e5*np.array(marcucci_data[3*omegab_size_marcucci:4*omegab_size_marcucci,3])
          Omegab_marcucci = interpolate.interp1d(dh_marcucci_DeltaN0,omegab_marcucci_data,kind='cubic')
          #print YpBBN_marcucci(0.022,0.)
          #print DH_marcucci(0.022,0.)
          self.get_Yp = YpBBN_marcucci
          self.get_DH = DH_marcucci

          self.get_Yperr = (lambda x,y: 0.5*self.two_sig_neutron_lifetime)
          self.get_DHerr = (lambda x,y: 0.5*self.two_sig_dh_marcucci)

          self.omegab_bounds = [omegab_marcucci_data[0],omegab_marcucci_data[-1]]
          self.dNeff_bounds = [deltaneff_marcucci_data[0],deltaneff_marcucci_data[-1]]

        elif "primat" in self.usedata:
          # Read grid 'primat' from PRIMAT (omegab,DeltaN,YpBBN,DH) and prepare interpolation
          primat_data = np.loadtxt(os.path.join(self.data_directory, self.primatfile),usecols=(0,2,4,5,6,7))
          omegab_size_primat = 52
          deltaneff_size_primat = 25
          if(len(primat_data) != omegab_size_primat*deltaneff_size_primat):
            raise io_mp.LikelihoodError(
                "In likelihood %s: " % self.name +
                "BBN data from file '%s' " % os.path.join(self.data_directory, self.primatfile) +
                "in incorrect format.")
          # for omega_b, take first omegab_size_primat points in 1st column
          omegab_primat_data = np.array(primat_data[:omegab_size_primat,0])
          # for deltaneff, take one poiunt every omegab_size_primat points in 2nd column
          deltaneff_primat_data = np.array(primat_data[::omegab_size_primat,1])
          ypbbn_primat_data = np.array(primat_data[:,2]).reshape(deltaneff_size_primat,omegab_size_primat)
          dh_primat_data = 1.e5*np.array(primat_data[:,4]).reshape(deltaneff_size_primat,omegab_size_primat)
          YpBBN_primat = interpolate.interp2d(omegab_primat_data,deltaneff_primat_data,ypbbn_primat_data,kind='cubic')
          DH_primat = interpolate.interp2d(omegab_primat_data,deltaneff_primat_data,dh_primat_data,kind='cubic')
          # Primat stores also the errors of its estimation
          sigma_ypbbn_primat_data = np.array(primat_data[:,3]).reshape(deltaneff_size_primat,omegab_size_primat)
          sigma_dh_primat_data = 1.e5*np.array(primat_data[:,5]).reshape(deltaneff_size_primat,omegab_size_primat)
          sigma_YpBBN_primat = interpolate.interp2d(omegab_primat_data,deltaneff_primat_data,sigma_ypbbn_primat_data,kind='cubic')
          sigma_DH_primat = interpolate.interp2d(omegab_primat_data,deltaneff_primat_data,sigma_dh_primat_data,kind='cubic')
          # ????
          dh_primat_DeltaN0 = 1.e5*np.array(primat_data[3*omegab_size_primat:4*omegab_size_primat,3])
          Omegab_primat = interpolate.interp1d(dh_primat_DeltaN0,omegab_primat_data,kind='cubic')
          #print YpBBN_primat(0.022,0.)
          #print DH_primat(0.022,0.)
          self.get_Yp = YpBBN_primat
          self.get_DH = DH_primat

          self.get_Yperr = sigma_YpBBN_primat
          self.get_DHerr = sigma_DH_primat

          self.omegab_bounds = [omegab_primat_data[0],omegab_primat_data[-1]]
          self.dNeff_bounds = [deltaneff_primat_data[0],deltaneff_primat_data[-1]]

        else:
            raise io_mp.LikelihoodError(
                "In likelihood %s: " % self.name +
                "unrecognized 'usedata' option %s." % self.usedata)

        if not (("dh" in self.include_bbn_type) or ("yp" in self.include_bbn_type)):
          raise io_mp.LikelihoodError(
              "In likelihood %s: " % self.name +
              "include_bbn_type ('%s') has to include either 'dh' or 'yp'." % self.include_bbn_type)
Пример #22
0
    def loglkl(self, cosmo, data):

      omega_b = cosmo.omega_b()
      dNeff = cosmo.Neff()-self.Neff0

      #TODO :: ideally, we would want to get N_eff from a call to class at BBN time, not like this

      if(omega_b < self.omegab_bounds[0]):
        raise io_mp.LikelihoodError("The value of omega_b = %e was below the BBN table. Aborting."%omega_b)
      if(omega_b > self.omegab_bounds[1]):
        raise io_mp.LikelihoodError("The value of omega_b = %e was above the BBN table. Aborting."%omega_b)

      if(dNeff < self.dNeff_bounds[0]):
        raise io_mp.LikelihoodError("The value of delta Neff = %e was below the BBN table. Aborting."%dNeff)
      if(dNeff > self.dNeff_bounds[1]):
        raise io_mp.LikelihoodError("The value of delta Neff = %e was above the BBN table. Aborting."%dNeff)

      yp = self.get_Yp(omega_b,dNeff)
      dh = self.get_DH(omega_b,dNeff)
      yperr = self.get_Yperr(omega_b,dNeff)
      dherr = self.get_DHerr(omega_b,dNeff)

      try:
        if len(yp)>0:
          yp = yp[0]
        if len(dh)>0:
          dh = dh[0]
        if len(yperr)>0:
          yperr = yperr[0]
        if len(dherr)>0:
          dherr = dherr[0]
      except:
        pass
      #print("From (omega_b,N_eff): Theoretical : Y_p = {:.5g} \pm {:.5g} , D_H = {:.5g} \pm {:.5g}".format(yp,yperr,dh,dherr))

      chi_square = 0.
      #Deal with deuterium
      if "dh" in self.include_bbn_type:
        chisquare_dh = (dh - self.dh_cooke_mean)**2/(self.dh_cooke_one_sig**2+dherr**2)
        chi_square += chisquare_dh
        #print("Chi square DH = ",chisquare_dh)

      #Deal with helium
      if "yp" in self.include_bbn_type:
        try:
          if 'cooke' in self.yp_measurement_type:
            if(yp>self.yp_cooke_mean):
              chisquare_yp = (yp - self.yp_cooke_mean)**2/(self.yp_cooke_one_sig_p**2+yperr**2)
            else:
              chisquare_yp = (yp - self.yp_cooke_mean)**2/(self.yp_cooke_one_sig_m**2+yperr**2)
          elif ('aver' in self.yp_measurement_type):
            chisquare_yp = (yp-self.yp_means['aver2015'])**2/(self.yp_sigs['aver2015']**2+yperr**2)
          elif ('peimbert' in self.yp_measurement_type):
            chisquare_yp = (yp-self.yp_means['peimbert2016'])**2/(self.yp_sigs['peimbert2016']**2+yperr**2)
          elif ('izotov' in self.yp_measurement_type):
            chisquare_yp = (yp-self.yp_means['izotov2014'])**2/(self.yp_sigs['izotov2014']**2+yperr**2)
          else:
            raise io_mp.LikelihoodError("Unrecognized experimental value of yp")
        except KeyError:
          raise io_mp.LikelihoodError("Unrecognized experimental value of yp")
        chi_square += chisquare_yp
        #print("Chi square YP = ",chisquare_yp)

      return -0.5*chi_square
Пример #23
0
    def loglkl(self, cosmo, data):

        # Write fiducial model spectra if needed (return an imaginary number in
        # that case)
        # If not, compute chi2 and proceed

        # If writing fiducial model is needed: read sensitivity (relative errors)
        if self.fid_values_exist is False:

            # open file where fiducial model will be written and write header
            fid_file = open(
                os.path.join(self.data_directory, self.fiducial_file), 'w')
            fid_file.write('# Fiducial parameters')
            for key, value in io_mp.dictitems(data.mcmc_parameters):
                fid_file.write(', %s = %.5g' %
                               (key, value['current'] * value['scale']))
            fid_file.write('\n')

            # open sensititivy file and ready relative errors
            if os.path.exists(
                    os.path.join(self.data_directory, self.sensitivity)):

                sensitivity = np.loadtxt(
                    os.path.join(
                        os.path.join(self.data_directory, self.sensitivity)))
                self.num_points = np.shape(sensitivity)[0]

                relative_error = np.array([], 'float64')
                relative_invcov11 = np.array([], 'float64')
                relative_invcov22 = np.array([], 'float64')
                relative_invcov12 = np.array([], 'float64')

                for i in range(self.num_points):
                    self.z = np.append(self.z, sensitivity[i, 0])
                    self.type = np.append(self.type, self.error_type)
                    if self.type[i] != 8:
                        relative_error = np.append(
                            relative_error,
                            0.01 * sensitivity[i, self.error_column])
                    else:
                        relative_invcov11 = np.append(relative_invcov11,
                                                      sensitivity[i, 1])
                        relative_invcov22 = np.append(relative_invcov22,
                                                      sensitivity[i, 2])
                        relative_invcov12 = np.append(relative_invcov12,
                                                      sensitivity[i, 3])
            else:
                raise io_mp.LikelihoodError("Could not find file ",
                                            self.sensitivity)

        # in all cases: initialise chi2 and compute observables:
        # angular distance da, radial distance dr,
        # volume distance dv, sound horizon at baryon drag rs_d,
        # Hubble parameter in km/s/Mpc
        chi2 = 0.
        for i in range(self.num_points):

            da = cosmo.angular_distance(self.z[i])
            dr = self.z[i] / cosmo.Hubble(self.z[i])
            dv = pow(da * da * (1 + self.z[i]) * (1 + self.z[i]) * dr, 1. / 3.)
            rs = cosmo.rs_drag()
            Hz = cosmo.Hubble(self.z[i]) * conts.c / 1000.0

            if self.type[i] == 3:
                theo = dv / rs

            elif self.type[i] == 4:
                theo = dv

            elif self.type[i] == 5:
                theo = da / rs

            elif self.type[i] == 6:
                theo = 1. / cosmo.Hubble(self.z[i]) / rs

            elif self.type[i] == 7:
                theo = rs / dv

            elif self.type[i] == 8:
                theo1 = Hz * rs
                theo2 = da / rs

            else:
                raise io_mp.LikelihoodError("In likelihood %s. " % self.name +
                                            "BAO data type %s " %
                                            self.type[i] +
                                            "in %d-th line not understood" % i)

            # if the fiducial model already exists: compute chi2
            if self.fid_values_exist is True:
                if self.type[i] != 8:
                    chi2 += ((theo - self.data[i]) / self.error[i])**2
                else:
                    chi2 += self.invcov11[i] * pow(
                        theo1 - self.data1[i], 2) + self.invcov22[i] * pow(
                            theo2 - self.data2[i], 2
                        ) + self.invcov12[i] * 2. * (theo1 - self.data1[i]) * (
                            theo2 - self.data2[i])

            # if the fiducial model does not exists: write fiducial model
            else:
                if self.type[i] != 8:
                    sigma = theo * relative_error[i]
                    fid_file.write(self.nickname)
                    fid_file.write("   %.8g  %.8g  %.8g %5d \n" %
                                   (self.z[i], theo, sigma, self.type[i]))
                else:
                    invcovmat11 = relative_invcov11[i] / theo1 / theo1
                    print(i, relative_invcov11[i], invcovmat11)
                    invcovmat22 = relative_invcov22[i] / theo2 / theo2
                    invcovmat12 = relative_invcov12[i] / theo1 / theo2
                    fid_file.write(self.nickname)
                    fid_file.write(
                        "   %7.8g  %16.8g  %16.8g  %16.8e  %16.8e  %16.8e  %5d\n"
                        % (self.z[i], theo1, theo2, invcovmat11, invcovmat22,
                           invcovmat12, self.type[i]))

        # Exit after writing fiducial file
        # (return an imaginary number to let the sampler know that fiducial models were just created)
        if self.fid_values_exist is False:
            print('\n')
            warnings.warn(
                "Writing fiducial model in %s, for %s likelihood\n" %
                (self.data_directory + '/' + self.fiducial_file, self.name))
            return 1j

        # Otherise, exit normally, returning ln(L)
        lkl = -0.5 * chi2
        return lkl