示例#1
0
def extend_chain(data, cosmo, command_line, target_folder, chain_name,
                 new_derived):
    """
    Reading the input point, and computing the new derived values

    """
    input_path = os.path.join(command_line.folder, chain_name)
    output_path = os.path.join(target_folder, chain_name)
    print(' -> reading ', input_path)
    # Put in parameter_names all the varying parameters, plus the derived ones
    # that are not part of new_derived
    parameter_names = data.get_mcmc_parameters(['varying'])
    parameter_names.extend([
        elem for elem in data.get_mcmc_parameters(['derived'])
        if elem not in new_derived
    ])
    with open(input_path, 'r') as input_chain:
        with open(output_path, 'w') as output_chain:
            for line in input_chain:
                if line[0] == '#':
                    output_chain.write(line)
                    continue
                params = line.split()
                # recover the likelihood of this point
                loglike = -float(params[1])
                N = int(params[0])
                # Assign all the recovered values to the data structure
                for index, param in enumerate(parameter_names):
                    data.mcmc_parameters[param]['current'] = \
                        float(params[2+index])
                # Compute the cosmology
                data.update_cosmo_arguments()
                if cosmo.state:
                    cosmo.struct_cleanup()
                cosmo.set(data.cosmo_arguments)
                try:
                    cosmo.compute(["lensing"])
                except CosmoComputationError:
                    pass
                # Recover all the derived parameters
                derived = cosmo.get_current_derived_parameters(
                    data.get_mcmc_parameters(['derived']))
                for name, value in dictitems(derived):
                    data.mcmc_parameters[name]['current'] = value
                for name in dictkeys(derived):
                    data.mcmc_parameters[name]['current'] /= \
                        data.mcmc_parameters[name]['scale']
                # Accept the point
                sampler.accept_step(data)
                io_mp.print_vector([output_chain], N, loglike, data)
    print(output_path, 'written')
示例#2
0
def recover_local_path(command_line):
    """
    Read the configuration file, filling a dictionary

    Returns
    -------
    path : dict
        contains the absolute path to the location of the code, the data, the
        cosmological code, and potential likelihood codes (clik for Planck,
        etc)
    """
    # Define the dictionnary that will hold the local configuration
    path = {}

    # The path is recovered by taking the path to this file (MontePython.py).
    # By default, then, the data folder is located in the same root directory.
    # Any setting in the configuration file will overwrite this one.
    path['root'] = os.path.sep.join(
        os.path.abspath(__file__).split(os.path.sep)[:-2])
    path['MontePython'] = os.path.join(path['root'], 'montepython')
    path['data'] = os.path.join(path['root'], 'data')

    # the rest is important only when running the MCMC chains
    if command_line.subparser_name == 'run':
        # Configuration file, defaulting to default.conf in your root
        # directory.  This can be changed with the command line option --conf.
        # All changes will be stored into the log.param of your folder, and
        # hence will be reused for an ulterior run in the same directory
        conf_file = os.path.abspath(command_line.config_file)
        if os.path.isfile(conf_file):
            for line in open(conf_file):
                exec(line)
            for key, value in dictitems(path):
                path[key] = os.path.normpath(os.path.expanduser(value))
        else:
            # The error is ignored if reading from a log.param, because it is
            # stored
            if command_line.param.find('log.param') == -1:
                raise io_mp.ConfigurationError(
                    "You must provide a valid  .conf file (I tried to read"
                    "%s) " % os.path.abspath(command_line.config_file) +
                    " that specifies the correct locations for your data "
                    "folder, Class, (Clik), etc...")

    return path
示例#3
0
    def loglkl(self, cosmo, data):

        #start = time.time()

        # One wants to obtain here the relation between z and r, this is done
        # by asking the cosmological module with the function z_of_r
        self.r = np.zeros(self.nzmax, 'float64')
        self.dzdr = np.zeros(self.nzmax, 'float64')

        self.r, self.dzdr = cosmo.z_of_r(self.z)

        # Compute now the selection function eta(r) = eta(z) dz/dr normalized
        # to one. The np.newaxis helps to broadcast the one-dimensional array
        # dzdr to the proper shape. Note that eta_norm is also broadcasted as
        # an array of the same shape as eta_z
        self.eta_r = self.eta_z * (self.dzdr[:, np.newaxis] / self.eta_norm)

        # Compute function g_i(r), that depends on r and the bin
        # g_i(r) = 2r(1+z(r)) int_r^+\infty drs eta_r(rs) (rs-r)/rs
        # The integration starts from r.
        g = np.zeros((self.nzmax, self.nbin), 'float64')
        for Bin in xrange(self.nbin):
            for nr in xrange(1, self.nzmax - 1):
                fun = self.eta_r[nr:, Bin] * (self.r[nr:] -
                                              self.r[nr]) / self.r[nr:]
                g[nr, Bin] = np.sum(0.5 * (fun[1:] + fun[:-1]) *
                                    (self.r[nr + 1:] - self.r[nr:-1]))
                g[nr, Bin] *= 2. * self.r[nr] * (1. + self.z[nr])

        # compute the maximum l where most contributions are linear
        # as a function of the lower bin number
        if self.use_lmax_lincut:
            lintegrand_lincut_o = np.zeros((self.nzmax, self.nbin, self.nbin),
                                           'float64')
            lintegrand_lincut_u = np.zeros((self.nzmax, self.nbin, self.nbin),
                                           'float64')
            l_lincut = np.zeros((self.nbin, self.nbin), 'float64')
            l_lincut_mean = np.zeros(self.nbin, 'float64')
            for Bin1 in xrange(self.nbin):
                for Bin2 in xrange(Bin1, self.nbin):
                    lintegrand_lincut_o[
                        1:, Bin1,
                        Bin2] = g[1:, Bin1] * g[1:, Bin2] / (self.r[1:])
            for Bin1 in xrange(self.nbin):
                for Bin2 in xrange(Bin1, self.nbin):
                    lintegrand_lincut_u[
                        1:, Bin1,
                        Bin2] = g[1:, Bin1] * g[1:, Bin2] / (self.r[1:]**2)
            for Bin1 in xrange(self.nbin):
                for Bin2 in xrange(Bin1, self.nbin):
                    l_lincut[Bin1, Bin2] = np.sum(
                        0.5 * (lintegrand_lincut_o[1:, Bin1, Bin2] +
                               lintegrand_lincut_o[:-1, Bin1, Bin2]) *
                        (self.r[1:] - self.r[:-1]))
                    l_lincut[Bin1, Bin2] /= np.sum(
                        0.5 * (lintegrand_lincut_u[1:, Bin1, Bin2] +
                               lintegrand_lincut_u[:-1, Bin1, Bin2]) *
                        (self.r[1:] - self.r[:-1]))
            z_peak = np.zeros((self.nbin, self.nbin), 'float64')
            for Bin1 in xrange(self.nbin):
                for Bin2 in xrange(Bin1, self.nbin):
                    z_peak[Bin1, Bin2] = self.zmax
                    for index_z in xrange(self.nzmax):
                        if (self.r[index_z] > l_lincut[Bin1, Bin2]):
                            z_peak[Bin1, Bin2] = self.z[index_z]
                            break
                    if self.use_zscaling:
                        l_lincut[Bin1,
                                 Bin2] *= self.kmax_hMpc * cosmo.h() * pow(
                                     1. + z_peak[Bin1, Bin2], 2. /
                                     (2. + cosmo.n_s()))
                    else:
                        l_lincut[Bin1, Bin2] *= self.kmax_hMpc * cosmo.h()
                l_lincut_mean[Bin1] = np.sum(
                    l_lincut[Bin1, :]) / (self.nbin - Bin1)

            #for Bin1 in xrange(self.nbin):
            #for Bin2 in xrange(Bin1,self.nbin):
            #print("%s\t%s\t%s\t%s" % (Bin1, Bin2, l_lincut[Bin1, Bin2], l_lincut_mean[Bin1]))

        #for nr in xrange(1, self.nzmax-1):
        #    print("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (self.z[nr], g[nr, 0], g[nr, 1], g[nr, 2], g[nr, 3], g[nr, 4], g[nr, 5], g[nr, 6], g[nr, 7], g[nr, 8], g[nr, 9]))

        # Get power spectrum P(k=l/r,z(r)) from cosmological module
        kmin_in_inv_Mpc = self.k_min_h_by_Mpc * cosmo.h()
        kmax_in_inv_Mpc = self.k_max_h_by_Mpc * cosmo.h()
        pk = np.zeros((self.nlmax, self.nzmax), 'float64')
        for index_l in xrange(self.nlmax):
            for index_z in xrange(1, self.nzmax):

                # These lines would return an error when you ask for P(k,z) out of computed range
                #        if (self.l[index_l]/self.r[index_z] > self.k_max):
                #            raise io_mp.LikelihoodError(
                #                "you should increase euclid_lensing.k_max up to at least %g" % (self.l[index_l]/self.r[index_z]))
                #        pk[index_l, index_z] = cosmo.pk(
                #            self.l[index_l]/self.r[index_z], self.z[index_z])

                # These lines set P(k,z) to zero out of [k_min, k_max] range
                k_in_inv_Mpc = self.l[index_l] / self.r[index_z]
                if (k_in_inv_Mpc < kmin_in_inv_Mpc) or (k_in_inv_Mpc >
                                                        kmax_in_inv_Mpc):
                    pk[index_l, index_z] = 0.
                else:
                    pk[index_l,
                       index_z] = cosmo.pk(self.l[index_l] / self.r[index_z],
                                           self.z[index_z])

                #print("%s\t%s\t%s" %(self.l[index_l], self.z[index_z], pk[index_l, index_z]))

        # Recover the non_linear scale computed by halofit. If no scale was
        # affected, set the scale to one, and make sure that the nuisance
        # parameter epsilon is set to zero
        k_sigma = np.zeros(self.nzmax, 'float64')
        if (cosmo.nonlinear_method == 0):
            k_sigma[:] = 1.e6
        else:
            k_sigma = cosmo.nonlinear_scale(self.z, self.nzmax)

        # replace unphysical values of k_sigma
        if not (cosmo.nonlinear_method == 0):
            k_sigma_problem = False
            for index_z in xrange(self.nzmax - 1):
                if (k_sigma[index_z + 1] <
                        k_sigma[index_z]) or (k_sigma[index_z + 1] > 2.5):
                    k_sigma[index_z + 1] = 2.5
                    k_sigma_problem = True
                #print("%s\t%s" % (k_sigma[index_z], self.z[index_z]))
            if k_sigma_problem:
                warnings.warn(
                    "There were unphysical (decreasing in redshift or exploding) values of k_sigma (=cosmo.nonlinear_scale(...)). To proceed they were set to 2.5, the highest scale that seems to be stable."
                )

        # Define the alpha function, that will characterize the theoretical
        # uncertainty. Chosen to be 0.001 at low k, raise between 0.1 and 0.2
        # to self.theoretical_error
        alpha = np.zeros((self.nlmax, self.nzmax), 'float64')
        # self.theoretical_error = 0.1
        if self.theoretical_error != 0:
            #MArchi     for index_l in range(self.nlmax):
            #k = self.l[index_l]/self.r[1:]
            #alpha[index_l, 1:] = np.log(1.+k[:]/k_sigma[1:])/(
            #1.+np.log(1.+k[:]/k_sigma[1:]))*self.theoretical_error
            for index_l in xrange(self.nlmax):
                for index_z in xrange(1, self.nzmax):
                    k = self.l[index_l] / self.r[index_z]
                    alpha[index_l,
                          index_z] = np.log(1. + k / k_sigma[index_z]) / (
                              1. + np.log(1. + k / k_sigma[index_z])
                          ) * self.theoretical_error

        # recover the e_th_nu part of the error function
        e_th_nu = self.coefficient_f_nu * cosmo.Omega_nu / cosmo.Omega_m()

        # Compute the Error E_th_nu function
        if 'epsilon' in self.use_nuisance:
            E_th_nu = np.zeros((self.nlmax, self.nzmax), 'float64')
            for index_l in range(1, self.nlmax):
                E_th_nu[index_l, :] = np.log(
                    1. + self.l[index_l] / k_sigma[:] * self.r[:]) / (
                        1. + np.log(1. + self.l[index_l] / k_sigma[:] *
                                    self.r[:])) * e_th_nu

                # Add the error function, with the nuisance parameter, to P_nl_th, if
                # the nuisance parameter exists
                for index_l in range(self.nlmax):
                    epsilon = data.mcmc_parameters['epsilon']['current'] * (
                        data.mcmc_parameters['epsilon']['scale'])
                    pk[index_l, :] *= (1. + epsilon * E_th_nu[index_l, :])

        # Start loop over l for computation of C_l^shear
        Cl_integrand = np.zeros((self.nzmax, self.nbin, self.nbin), 'float64')
        Cl = np.zeros((self.nlmax, self.nbin, self.nbin), 'float64')
        # Start loop over l for computation of E_l
        if self.theoretical_error != 0:
            El_integrand = np.zeros((self.nzmax, self.nbin, self.nbin),
                                    'float64')
            El = np.zeros((self.nlmax, self.nbin, self.nbin), 'float64')

        for nl in xrange(self.nlmax):

            # find Cl_integrand = (g(r) / r)**2 * P(l/r,z(r))
            for Bin1 in xrange(self.nbin):
                for Bin2 in xrange(Bin1, self.nbin):
                    Cl_integrand[1:, Bin1,
                                 Bin2] = g[1:, Bin1] * g[1:, Bin2] / (
                                     self.r[1:]**2) * pk[nl, 1:]
                    if self.theoretical_error != 0:
                        El_integrand[1:, Bin1, Bin2] = g[1:, Bin1] * (
                            g[1:, Bin2]) / (self.r[1:]**
                                            2) * pk[nl, 1:] * alpha[nl, 1:]

            # Integrate over r to get C_l^shear_ij = P_ij(l)
            # C_l^shear_ij = 9/16 Omega0_m^2 H_0^4 \sum_0^rmax dr (g_i(r)
            # g_j(r) /r**2) P(k=l/r,z(r))
            # It it then multiplied by 9/16*Omega_m**2 to be in units of Mpc**4
            # and then by (h/2997.9)**4 to be dimensionless
            for Bin1 in xrange(self.nbin):
                for Bin2 in xrange(Bin1, self.nbin):
                    Cl[nl, Bin1,
                       Bin2] = np.sum(0.5 * (Cl_integrand[1:, Bin1, Bin2] +
                                             Cl_integrand[:-1, Bin1, Bin2]) *
                                      (self.r[1:] - self.r[:-1]))
                    Cl[nl, Bin1, Bin2] *= 9. / 16. * (cosmo.Omega_m())**2
                    Cl[nl, Bin1, Bin2] *= (cosmo.h() / 2997.9)**4

                    if self.theoretical_error != 0:
                        El[nl, Bin1, Bin2] = np.sum(
                            0.5 * (El_integrand[1:, Bin1, Bin2] +
                                   El_integrand[:-1, Bin1, Bin2]) *
                            (self.r[1:] - self.r[:-1]))
                        El[nl, Bin1, Bin2] *= 9. / 16. * (cosmo.Omega_m())**2
                        El[nl, Bin1, Bin2] *= (cosmo.h() / 2997.9)**4
                    if Bin1 == Bin2:
                        Cl[nl, Bin1, Bin2] += self.noise

        # Write fiducial model spectra if needed (exit in that case)
        if self.fid_values_exist is False:
            # Store the values now, and exit.
            fid_file_path = os.path.join(self.data_directory,
                                         self.fiducial_file)
            with open(fid_file_path, 'w') as fid_file:
                fid_file.write('# Fiducial parameters')
                for key, value in io_mp.dictitems(data.mcmc_parameters):
                    fid_file.write(', %s = %.5g' %
                                   (key, value['current'] * value['scale']))
                fid_file.write('\n')
                for nl in range(self.nlmax):
                    for Bin1 in range(self.nbin):
                        for Bin2 in range(self.nbin):
                            fid_file.write("%.8g\n" % Cl[nl, Bin1, Bin2])
            print('\n')
            warnings.warn(
                "Writing fiducial model in %s, for %s likelihood\n" %
                (self.data_directory + '/' + self.fiducial_file, self.name))
            return 1j

        # Now that the fiducial model is stored, we add the El to both Cl and
        # Cl_fid (we create a new array, otherwise we would modify the
        # self.Cl_fid from one step to the other)

        # Spline Cl[nl,Bin1,Bin2] along l
        spline_Cl = np.empty((self.nbin, self.nbin), dtype=(list, 3))
        for Bin1 in xrange(self.nbin):
            for Bin2 in xrange(Bin1, self.nbin):
                spline_Cl[Bin1,
                          Bin2] = list(itp.splrep(self.l, Cl[:, Bin1, Bin2]))
                if Bin2 > Bin1:
                    spline_Cl[Bin2, Bin1] = spline_Cl[Bin1, Bin2]

        # Spline El[nl,Bin1,Bin2] along l
        if self.theoretical_error != 0:
            spline_El = np.empty((self.nbin, self.nbin), dtype=(list, 3))
            for Bin1 in xrange(self.nbin):
                for Bin2 in xrange(Bin1, self.nbin):
                    spline_El[Bin1, Bin2] = list(
                        itp.splrep(self.l, El[:, Bin1, Bin2]))
                    if Bin2 > Bin1:
                        spline_El[Bin2, Bin1] = spline_El[Bin1, Bin2]

        # Spline Cl_fid[nl,Bin1,Bin2]    along l
        spline_Cl_fid = np.empty((self.nbin, self.nbin), dtype=(list, 3))
        for Bin1 in xrange(self.nbin):
            for Bin2 in xrange(Bin1, self.nbin):
                spline_Cl_fid[Bin1, Bin2] = list(
                    itp.splrep(self.l, self.Cl_fid[:, Bin1, Bin2]))
                if Bin2 > Bin1:
                    spline_Cl_fid[Bin2, Bin1] = spline_Cl_fid[Bin1, Bin2]

        # Compute likelihood

        # Prepare interpolation for every integer value of l, from the array
        # self.l, to finally compute the likelihood (sum over all l's)
        dof = 1. / (int(self.l[-1]) - int(self.l[0]) + 1)

        ells = range(int(self.l[0]), int(self.l[-1]) + 1)

        # Define cov theory, observ and error on the whole integer range of ell
        # values
        Cov_theory = np.zeros((len(ells), self.nbin, self.nbin), 'float64')
        Cov_observ = np.zeros((len(ells), self.nbin, self.nbin), 'float64')
        Cov_error = np.zeros((len(ells), self.nbin, self.nbin), 'float64')

        for Bin1 in xrange(self.nbin):
            for Bin2 in xrange(Bin1, self.nbin):
                Cov_theory[:, Bin1, Bin2] = itp.splev(ells, spline_Cl[Bin1,
                                                                      Bin2])
                Cov_observ[:, Bin1,
                           Bin2] = itp.splev(ells, spline_Cl_fid[Bin1, Bin2])
                if self.theoretical_error > 0:
                    Cov_error[:, Bin1,
                              Bin2] = itp.splev(ells, spline_El[Bin1, Bin2])
                if Bin2 > Bin1:
                    Cov_theory[:, Bin2, Bin1] = Cov_theory[:, Bin1, Bin2]
                    Cov_observ[:, Bin2, Bin1] = Cov_observ[:, Bin1, Bin2]
                    Cov_error[:, Bin2, Bin1] = Cov_error[:, Bin1, Bin2]

        chi2 = 0.

        # TODO parallelize this
        for index, ell in enumerate(ells):

            if self.use_lmax_lincut:
                CutBin = -1
                for zBin in xrange(self.nbin):
                    if (ell < l_lincut_mean[zBin]):
                        CutBin = zBin
                        det_theory = np.linalg.det(Cov_theory[index, CutBin:,
                                                              CutBin:])
                        det_observ = np.linalg.det(Cov_observ[index, CutBin:,
                                                              CutBin:])
                        break
                if (CutBin == -1):
                    break
            else:
                det_theory = np.linalg.det(Cov_theory[index, :, :])
                det_observ = np.linalg.det(Cov_observ[index, :, :])

            if (self.theoretical_error > 0):
                det_cross_err = 0
                for i in range(self.nbin):
                    newCov = np.copy(Cov_theory[
                        index, :, :])  #MArchi#newCov = np.copy(Cov_theory)
                    newCov[:, i] = Cov_error[
                        index, :, i]  #MArchi#newCov[:, i] = Cov_error[:, i]
                    det_cross_err += np.linalg.det(newCov)

                # Newton method
                # Find starting point for the method:
                start = 0
                step = 0.001 * det_theory / det_cross_err
                error = 1
                old_chi2 = -1. * data.boundary_loglike
                error_tol = 0.01
                epsilon_l = start
                while error > error_tol:
                    vector = np.array(
                        [epsilon_l - step, epsilon_l, epsilon_l + step])
                    #print(vector.shape)
                    # Computing the function on three neighbouring points
                    function_vector = np.zeros(3, 'float64')
                    for k in range(3):
                        Cov_theory_plus_error = Cov_theory + vector[
                            k] * Cov_error
                        det_theory_plus_error = np.linalg.det(
                            Cov_theory_plus_error[index, :, :]
                        )  #MArchi#det_theory_plus_error = np.linalg.det(Cov_theory_plus_error)
                        det_theory_plus_error_cross_obs = 0
                        for i in range(self.nbin):
                            newCov = np.copy(
                                Cov_theory_plus_error[index, :, :]
                            )  #MArchi#newCov = np.copy(Cov_theory_plus_error)
                            newCov[:, i] = Cov_observ[
                                index, :,
                                i]  #MArchi#newCov[:, i] = Cov_observ[:, i]
                            det_theory_plus_error_cross_obs += np.linalg.det(
                                newCov)
                        try:
                            function_vector[k] = (
                                2. * ell + 1.) * self.fsky * (
                                    det_theory_plus_error_cross_obs /
                                    det_theory_plus_error + math.log(
                                        det_theory_plus_error / det_observ) -
                                    self.nbin) + dof * vector[k]**2
                        except ValueError:
                            warnings.warn(
                                "Euclid_lensing: Could not evaluate chi2 including theoretical error with the current parameters. The corresponding chi2 is now set to nan!"
                            )
                            break
                            break
                            break
                            chi2 = np.nan

                    # Computing first
                    first_d = (function_vector[2] -
                               function_vector[0]) / (vector[2] - vector[0])
                    second_d = (function_vector[2] + function_vector[0] -
                                2 * function_vector[1]) / (vector[2] -
                                                           vector[1])**2

                    # Updating point and error
                    epsilon_l = vector[1] - first_d / second_d
                    error = abs(function_vector[1] - old_chi2)
                    old_chi2 = function_vector[1]
            # End Newton

                Cov_theory_plus_error = Cov_theory + epsilon_l * Cov_error
                det_theory_plus_error = np.linalg.det(
                    Cov_theory_plus_error[index, :, :]
                )  #MArchi#det_theory_plus_error = np.linalg.det(Cov_theory_plus_error)

                det_theory_plus_error_cross_obs = 0
                for i in range(self.nbin):
                    newCov = np.copy(
                        Cov_theory_plus_error[index, :, :]
                    )  #MArchi#newCov = np.copy(Cov_theory_plus_error)
                    newCov[:, i] = Cov_observ[
                        index, :, i]  #MArchi#newCov[:, i] = Cov_observ[:, i]
                    det_theory_plus_error_cross_obs += np.linalg.det(newCov)

                chi2 += (2. * ell + 1.) * self.fsky * (
                    det_theory_plus_error_cross_obs / det_theory_plus_error +
                    math.log(det_theory_plus_error / det_observ) -
                    self.nbin) + dof * epsilon_l**2

            else:
                if self.use_lmax_lincut:
                    det_cross = 0.
                    for i in xrange(0, self.nbin - CutBin):
                        newCov = np.copy(Cov_theory[index, CutBin:, CutBin:])
                        newCov[:, i] = Cov_observ[index, CutBin:, CutBin + i]
                        det_cross += np.linalg.det(newCov)
                else:
                    det_cross = 0.
                    for i in xrange(self.nbin):
                        newCov = np.copy(Cov_theory[index, :, :])
                        newCov[:, i] = Cov_observ[index, :, i]
                        det_cross += np.linalg.det(newCov)

                if self.use_lmax_lincut:
                    chi2 += (2. * ell + 1.) * self.fsky * (
                        det_cross / det_theory +
                        math.log(det_theory / det_observ) - self.nbin + CutBin)
                else:
                    chi2 += (2. * ell + 1.) * self.fsky * (
                        det_cross / det_theory +
                        math.log(det_theory / det_observ) - self.nbin)

        # Finally adding a gaussian prior on the epsilon nuisance parameter, if
        # present
        if 'epsilon' in self.use_nuisance:
            epsilon = data.mcmc_parameters['epsilon']['current'] * \
                data.mcmc_parameters['epsilon']['scale']
            chi2 += epsilon**2

        #end = time.time()
        #print("time needed in s:",(end-start))

        return -chi2 / 2.
示例#4
0
    def loglkl(self, cosmo, data):

        # First thing, recover the angular distance and Hubble factor for each
        # redshift
        H = np.zeros(2 * self.nbin + 1, 'float64')
        D_A = np.zeros(2 * self.nbin + 1, 'float64')
        r = np.zeros(2 * self.nbin + 1, 'float64')

        # H is incidentally also dz/dr
        r, H = cosmo.z_of_r(self.z)
        for i in xrange(len(D_A)):
            D_A[i] = cosmo.angular_distance(self.z[i])

        # Compute V_survey, for each given redshift bin, which is the volume of
        # a shell times the sky coverage (only fiducial needed):
        if self.fid_values_exist is False:
            V_survey = np.zeros(self.nbin, 'float64')
            for index_z in xrange(self.nbin):
                V_survey[index_z] = 4. / 3. * pi * self.fsky * (
                    r[2 * index_z + 2]**3 - r[2 * index_z]**3)

        # At the center of each bin, compute the HI bias function,
        # using formula from 1609.00019v1: b_0 + b_1*(1+z)^b_2
        if 'beta_0^IM' in self.use_nuisance:
            b_HI = (self.b_0 + self.b_1 * pow(
                1. + self.z_mean, self.b_2 * data.mcmc_parameters['beta_1^IM']
                ['current'] * data.mcmc_parameters['beta_1^IM']['scale'])
                    ) * data.mcmc_parameters['beta_0^IM'][
                        'current'] * data.mcmc_parameters['beta_0^IM']['scale']
        else:
            b_HI = self.b_0 + self.b_1 * pow(1. + self.z_mean, self.b_2)

        # At the center of each bin, compute Omega_HI
        # using formula from 1609.00019v1: Om_0*(1+z)^Om_1
        if 'Omega_HI0' in self.use_nuisance:
            Omega_HI = data.mcmc_parameters['Omega_HI0'][
                'current'] * data.mcmc_parameters['Omega_HI0']['scale'] * pow(
                    1. + self.z_mean,
                    data.mcmc_parameters['alpha_HI']['current'] *
                    data.mcmc_parameters['alpha_HI']['scale'])
        else:
            Omega_HI = self.Om_0 * pow(1. + self.z_mean, self.Om_1)

        # Compute the 21cm bias: b_21 = Delta_T_bar*b_HI in mK
        b_21 = np.zeros((self.nbin), 'float64')
        for index_z in xrange(self.nbin):
            b_21[index_z] = 189. * cosmo.Hubble(0.) * cosmo.h() / H[
                2 * index_z + 1] * (1. + self.z_mean[index_z]
                                    )**2 * b_HI[index_z] * Omega_HI[index_z]

        # Compute freq.res. sigma_r = (1+z)^2/H*delta_nu/sqrt(8*ln2)/nu_21cm, nu in Mhz
        # Compute ang.res. sigma_perp = (1+z)^2*D_A*lambda_21cm/diameter/sqrt(8*ln2), diameter in m
        # combine into exp(-k^2*(mu^2*(sig_r^2-sig_perp^2)+sig_perp^2)) independent of cosmo
        # used as exp(-k^2*(mu^2*sigma_A+sigma_B)) all fiducial
        if self.fid_values_exist is False:
            sigma_A = np.zeros(self.nbin, 'float64')
            sigma_B = np.zeros(self.nbin, 'float64')
            sigma_A = ((1. + self.z_mean[:])**2 / H[1::2] * self.delta_nu /
                       np.sqrt(8. * np.log(2.)) /
                       self.nu0)**2 - (1. / np.sqrt(8. * np.log(2.)) *
                                       (1 + self.z_mean[:])**2 * D_A[1::2] *
                                       2.111e-1 / self.Diameter)**2
            sigma_B = (1. / np.sqrt(8. * np.log(2.)) *
                       (1 + self.z_mean[:])**2 * D_A[1::2] * 2.111e-1 /
                       self.Diameter)**2

        # sigma_NL in Mpc = nonlinear dispersion scale of RSD (1405.1452v2)
        sigma_NL = 0.0  # fiducial would be 7 but when kept constant that is more constraining than keeping 0
        if 'sigma_NL' in self.use_nuisance:
            sigma_NL = data.mcmc_parameters['sigma_NL'][
                'current'] * data.mcmc_parameters['sigma_NL']['scale']

        # If the fiducial model does not exists, recover the power spectrum and
        # store it, then exit.
        if self.fid_values_exist is False:
            pk = np.zeros((self.k_size, 2 * self.nbin + 1), 'float64')
            if self.use_linear_rsd:
                pk_lin = np.zeros((self.k_size, 2 * self.nbin + 1), 'float64')
            fid_file_path = os.path.join(self.data_directory,
                                         self.fiducial_file)
            with open(fid_file_path, 'w') as fid_file:
                fid_file.write('# Fiducial parameters')
                for key, value in io_mp.dictitems(data.mcmc_parameters):
                    fid_file.write(', %s = %.5g' %
                                   (key, value['current'] * value['scale']))
                fid_file.write('\n')
                for index_k in xrange(self.k_size):
                    for index_z in xrange(2 * self.nbin + 1):
                        pk[index_k,
                           index_z] = cosmo.pk_cb(self.k_fid[index_k],
                                                  self.z[index_z])
                        if self.use_linear_rsd:
                            pk_lin[index_k, index_z] = cosmo.pk_cb_lin(
                                self.k_fid[index_k], self.z[index_z])
                            fid_file.write('%.8g %.8g\n' %
                                           (pk[index_k, index_z],
                                            pk_lin[index_k, index_z]))
                        else:
                            fid_file.write('%.8g\n' % pk[index_k, index_z])
                for index_z in xrange(2 * self.nbin + 1):
                    fid_file.write('%.8g %.8g\n' % (H[index_z], D_A[index_z]))
                for index_z in xrange(self.nbin):
                    fid_file.write('%.8g\n' % V_survey[index_z])
                for index_z in xrange(self.nbin):
                    fid_file.write('%.8g\n' % b_21[index_z])
                for index_z in xrange(self.nbin):
                    fid_file.write('%.8g %.8g\n' %
                                   (sigma_A[index_z], sigma_B[index_z]))
                fid_file.write('%.8g\n' % sigma_NL)
            print('\n')
            warnings.warn(
                "Writing fiducial model in %s, for %s likelihood\n" %
                (self.data_directory + '/' + self.fiducial_file, self.name))
            return 1j

        # NOTE: Many following loops will be hidden in a very specific numpy
        # expression, for (a more than significant) speed-up. All the following
        # loops keep the same pattern.  The colon denotes the whole range of
        # indices, so beta_fid[:,index_z] denotes the array of length
        # self.k_size at redshift z[index_z]

        # Compute the beta_fid function, for observed spectrum,
        # beta_fid(k_fid,z) = 1/2b(z) * d log(P_nl_fid(k_fid,z))/d log a
        #                   = -1/2b(z)* (1+z) d log(P_nl_fid(k_fid,z))/dz
        if self.use_linear_rsd:
            beta_fid = -0.5 / self.b_21_fid * (1 + self.z_mean) * np.log(
                self.pk_lin_fid[:, 2::2] / self.pk_lin_fid[:, :-2:2]) / self.dz
        else:
            beta_fid = -0.5 / self.b_21_fid * (1 + self.z_mean) * np.log(
                self.pk_nl_fid[:, 2::2] / self.pk_nl_fid[:, :-2:2]) / self.dz

        # Compute the tilde P_fid(k_ref,z,mu) = H_fid(z)/D_A_fid(z)**2 ( 1 + beta_fid(k_fid,z)mu^2)^2 P_nl_fid(k_fid,z)exp(-k_fid^2*(mu_fid^2*sigma_A(z)+sigma_B(z)))
        self.tilde_P_fid = np.zeros((self.k_size, self.nbin, self.mu_size),
                                    'float64')
        self.tilde_P_fid = self.H_fid[na, 1::2, na] / (
            self.D_A_fid[na, 1::2, na])**2 * self.b_21_fid[na, :, na]**2 * (
                1. + beta_fid[:, :, na] * self.mu_fid[na, na, :]**2)**2 * (
                    self.pk_nl_fid[:, 1::2, na]) * np.exp(
                        -self.k_fid[:, na, na]**2 *
                        (self.mu_fid[na, na, :]**2 *
                         (self.sigma_A_fid[na, :, na] + self.sigma_NL_fid**2) +
                         self.sigma_B_fid[na, :, na]))

        ######################
        # TH PART
        ######################
        # Compute values of k based on fiducial values:
        # k^2 = ( (1-mu^2) D_A_fid(z)^2/D_A(z)^2 + mu^2 H(z)^2/H_fid(z)^2) k_fid ^ 2
        self.k = np.zeros((self.k_size, 2 * self.nbin + 1, self.mu_size),
                          'float64')
        for index_k in xrange(self.k_size):
            for index_z in xrange(2 * self.nbin + 1):
                self.k[index_k, index_z, :] = np.sqrt(
                    (1. - self.mu_fid[:]**2) * self.D_A_fid[index_z]**2 /
                    D_A[index_z]**2 + self.mu_fid[:]**2 * H[index_z]**2 /
                    self.H_fid[index_z]**2) * self.k_fid[index_k]

        # Compute values of mu based on fiducial values:
        # mu^2 = mu_fid^2 / (mu_fid^2 + ((H_fid*D_A_fid)/(H*D_A))^2)*(1 - mu_fid^2))
        self.mu = np.zeros((self.nbin, self.mu_size), 'float64')
        for index_z in xrange(self.nbin):
            self.mu[index_z, :] = np.sqrt(
                self.mu_fid[:]**2 /
                (self.mu_fid[:]**2 +
                 ((self.H_fid[2 * index_z + 1] * self.D_A_fid[2 * index_z + 1])
                  / (D_A[2 * index_z + 1] * H[2 * index_z + 1]))**2 *
                 (1. - self.mu_fid[:]**2)))

        # Recover the non-linear power spectrum from the cosmological module on all
        # the z_boundaries, to compute afterwards beta. This is pk_nl_th from the
        # notes.
        pk_nl_th = np.zeros((self.k_size, 2 * self.nbin + 1, self.mu_size),
                            'float64')
        if self.use_linear_rsd:
            pk_lin_th = np.zeros(
                (self.k_size, 2 * self.nbin + 1, self.mu_size), 'float64')

        # The next line is the bottleneck.
        # TODO: the likelihood could be sped up if this could be vectorised, either here,
        # or inside classy where there are three loops in the function get_pk
        # (maybe with a different strategy for the arguments of the function)
        pk_nl_th = cosmo.get_pk_cb(self.k, self.z, self.k_size,
                                   2 * self.nbin + 1, self.mu_size)
        if self.use_linear_rsd:
            pk_lin_th = cosmo.get_pk_cb_lin(self.k, self.z, self.k_size,
                                            2 * self.nbin + 1, self.mu_size)

        if self.UseTheoError:
            # Recover the non_linear scale computed by halofit.
            #self.k_sigma = np.zeros(2*self.nbin+1, 'float64')
            #self.k_sigma = cosmo.nonlinear_scale(self.z,2*self.nbin+1)

            # Define the theoretical error envelope
            self.alpha = np.zeros((self.k_size, self.nbin, self.mu_size),
                                  'float64')
            th_c1 = 0.75056
            th_c2 = 1.5120
            th_a1 = 0.014806
            th_a2 = 0.022047
            for index_z in xrange(self.nbin):
                k_z = cosmo.h() * pow(1. + self.z_mean[index_z], 2. /
                                      (2. + cosmo.n_s()))
                for index_mu in xrange(self.mu_size):
                    for index_k in xrange(self.k_size):
                        if self.k[index_k, 2 * index_z + 1,
                                  index_mu] / k_z < 0.3:
                            self.alpha[index_k, index_z,
                                       index_mu] = th_a1 * np.exp(
                                           th_c1 * np.log10(
                                               self.k[index_k, 2 * index_z + 1,
                                                      index_mu] / k_z))
                        else:
                            self.alpha[index_k, index_z,
                                       index_mu] = th_a2 * np.exp(
                                           th_c2 * np.log10(
                                               self.k[index_k, 2 * index_z + 1,
                                                      index_mu] / k_z))

            # Define fractional theoretical error variance R/P^2
            self.R_var = np.zeros((self.k_size, self.nbin, self.mu_size),
                                  'float64')
            for index_k in xrange(self.k_size):
                for index_z in xrange(self.nbin):
                    self.R_var[index_k, index_z, :] = self.V_fid[index_z] / (
                        2. * np.pi)**2 * self.k_CorrLength_hMpc * cosmo.h(
                        ) / self.z_CorrLength * self.dz * self.k_fid[
                            index_k]**2 * self.alpha[index_k, index_z, :]**2

        # Compute the beta function for nl,
        # beta(k,z) = 1/2b(z) * d log(P_nl_th (k,z))/d log a
        #           = -1/2b(z) *(1+z) d log(P_nl_th (k,z))/dz
        beta_th = np.zeros((self.k_size, self.nbin, self.mu_size), 'float64')
        for index_k in xrange(self.k_size):
            for index_z in xrange(self.nbin):
                if self.use_linear_rsd:
                    beta_th[index_k, index_z, :] = -1. / (
                        2. *
                        b_21[index_z]) * (1. + self.z_mean[index_z]) * np.log(
                            pk_lin_th[index_k, 2 * index_z + 2, :] /
                            pk_lin_th[index_k, 2 * index_z, :]) / (self.dz)
                else:
                    beta_th[index_k, index_z, :] = -1. / (
                        2. *
                        b_21[index_z]) * (1. + self.z_mean[index_z]) * np.log(
                            pk_nl_th[index_k, 2 * index_z + 2, :] /
                            pk_nl_th[index_k, 2 * index_z, :]) / (self.dz)

        # Compute \tilde P_th(k,mu,z) = H(z)/D_A(z)^2 * (1 + beta(z,k) mu^2)^2 exp(-k^2 mu^2 sigma_NL^2) P_nl_th(k,z) exp(-k^2 (mu^2 sigma_A + sigma_B))
        self.tilde_P_th = np.zeros((self.k_size, self.nbin, self.mu_size),
                                   'float64')
        for index_k in xrange(self.k_size):
            for index_z in xrange(self.nbin):
                self.tilde_P_th[index_k, index_z, :] = H[2 * index_z + 1] / (
                    D_A[2 * index_z + 1]**2) * b_21[index_z]**2 * (
                        1. + beta_th[index_k, index_z, :] *
                        self.mu[index_z, :] * self.mu[index_z, :])**2 * np.exp(
                            -self.k[index_k, 2 * index_z + 1, :]**2 *
                            self.mu[index_z, :]**2 * sigma_NL**2
                        ) * pk_nl_th[index_k, 2 * index_z + 1, :] * np.exp(
                            -self.k_fid[index_k]**2 *
                            (self.mu_fid[:]**2 * self.sigma_A_fid[index_z] +
                             self.sigma_B_fid[index_z]))

        # Shot noise spectrum
        self.P_shot = np.zeros((self.nbin), 'float64')
        for index_z in xrange(self.nbin):
            self.P_shot[index_z] = self.H_fid[2 * index_z + 1] / self.D_A_fid[
                2 * index_z + 1]**2 * (
                    self.T_inst +
                    20000. * pow(self.nu0 /
                                 (1. + self.z_mean[index_z]) / 408., -2.75)
                )**2 * 4. * np.pi * self.fsky * (
                    (1. + self.z_mean[index_z])**2 *
                    self.D_A_fid[2 * index_z + 1])**2 / (
                        2. * self.t_tot * 3600. * self.nu0 * 1.e+6 *
                        self.N_dish * self.H_fid[2 * index_z + 1])

        # finally compute chi2, for each z_mean
        if self.use_zscaling == 0:
            # redshift dependent cutoff makes integration more complicated
            chi2 = 0.0
            index_kmax = 0
            delta_mu = self.mu_fid[1] - self.mu_fid[0]  # equally spaced
            integrand_low = 0.0
            integrand_hi = 0.0

            for index_z in xrange(self.nbin):
                # uncomment printers to get contributions from individual redshift bins
                #printer1 = chi2*delta_mu
                # uncomment to display max. kmin (used to infer kmin~0.02):
                #kmin: #print("z=" + str(self.z_mean[index_z]) + " kmin=" + str(34.56/r[2*index_z+1]) + "\tor " + str(6.283/(r[2*index_z+2]-r[2*index_z])))
                for index_k in xrange(1, self.k_size):
                    if ((self.k_cut(self.z_mean[index_z], cosmo.h(),
                                    cosmo.n_s()) -
                         self.k_fid[self.k_size - index_k]) > -1.e-6):
                        index_kmax = self.k_size - index_k
                        break
                integrand_low = self.integrand(0, index_z, 0) * .5
                for index_k in xrange(1, index_kmax + 1):
                    integrand_hi = self.integrand(index_k, index_z, 0) * .5
                    chi2 += (integrand_hi + integrand_low) * .5 * (
                        self.k_fid[index_k] - self.k_fid[index_k - 1])
                    integrand_low = integrand_hi
                chi2 += integrand_low * (
                    self.k_cut(self.z_mean[index_z], cosmo.h(), cosmo.n_s()) -
                    self.k_fid[index_kmax])
                for index_mu in xrange(1, self.mu_size - 1):
                    integrand_low = self.integrand(0, index_z, index_mu)
                    for index_k in xrange(1, index_kmax + 1):
                        integrand_hi = self.integrand(index_k, index_z,
                                                      index_mu)
                        chi2 += (integrand_hi + integrand_low) * .5 * (
                            self.k_fid[index_k] - self.k_fid[index_k - 1])
                        integrand_low = integrand_hi
                    chi2 += integrand_low * (self.k_cut(
                        self.z_mean[index_z], cosmo.h(), cosmo.n_s()) -
                                             self.k_fid[index_kmax])
                integrand_low = self.integrand(0, index_z,
                                               self.mu_size - 1) * .5
                for index_k in xrange(1, index_kmax + 1):
                    integrand_hi = self.integrand(index_k, index_z,
                                                  self.mu_size - 1) * .5
                    chi2 += (integrand_hi + integrand_low) * .5 * (
                        self.k_fid[index_k] - self.k_fid[index_k - 1])
                    integrand_low = integrand_hi
                chi2 += integrand_low * (
                    self.k_cut(self.z_mean[index_z], cosmo.h(), cosmo.n_s()) -
                    self.k_fid[index_kmax])
                #printer2 = chi2*delta_mu-printer1
                #print("%s\t%s" % (self.z_mean[index_z], printer2))
            chi2 *= delta_mu

        else:
            chi2 = 0.0
            mu_integrand_lo, mu_integrand_hi = 0.0, 0.0
            k_integrand = np.zeros(self.k_size, 'float64')
            for index_z in xrange(self.nbin):
                k_integrand = self.array_integrand(index_z, 0)
                mu_integrand_hi = np.sum(
                    (k_integrand[1:] + k_integrand[0:-1]) * .5 *
                    (self.k_fid[1:] - self.k_fid[:-1]))
                for index_mu in xrange(1, self.mu_size):
                    mu_integrand_lo = mu_integrand_hi
                    mu_integrand_hi = 0
                    k_integrand = self.array_integrand(index_z, index_mu)
                    mu_integrand_hi = np.sum(
                        (k_integrand[1:] + k_integrand[0:-1]) * .5 *
                        (self.k_fid[1:] - self.k_fid[:-1]))
                    chi2 += (mu_integrand_hi + mu_integrand_lo) / 2. * (
                        self.mu_fid[index_mu] - self.mu_fid[index_mu - 1])

        if 'beta_0^IM' in self.use_nuisance:
            chi2 += ((data.mcmc_parameters['beta_0^IM']['current'] *
                      data.mcmc_parameters['beta_0^IM']['scale'] - 1.) /
                     self.bias_accuracy)**2
            chi2 += ((data.mcmc_parameters['beta_1^IM']['current'] *
                      data.mcmc_parameters['beta_1^IM']['scale'] - 1.) /
                     self.bias_accuracy)**2
        return -chi2 / 2.
示例#5
0
    def loglkl(self, cosmo, data):
        # First thing, recover the angular distance and Hubble factor for each
        # redshift
        H = np.zeros(2 * self.nbin + 1, 'float64')
        D_A = np.zeros(2 * self.nbin + 1, 'float64')
        r = np.zeros(2 * self.nbin + 1, 'float64')

        # H is incidentally also dz/dr
        r, H = cosmo.z_of_r(self.z)
        for i in xrange(len(D_A)):
            D_A[i] = cosmo.angular_distance(self.z[i])

        # Compute sigma_r = dr(z)/dz sigma_z with sigma_z = 0.001(1+z)
        sigma_r = np.zeros(self.nbin, 'float64')
        for index_z in xrange(self.nbin):
            sigma_r[index_z] = 0.001 * (
                1. + self.z_mean[index_z]) / H[2 * index_z + 1]

        # TS; Option: nuisance sigma_NL in Mpc = nonlinear dispersion scale of RSD (1405.1452v2)
        sigma_NL = 0.0  # fiducial would be 7 but when kept constant that is more constraining than keeping 0
        if 'sigma_NL' in self.use_nuisance:
            sigma_NL = data.mcmc_parameters['sigma_NL'][
                'current'] * data.mcmc_parameters['sigma_NL']['scale']

        # At the center of each bin, compute the bias function, simply taken
        # as sqrt(z_mean+1)
        if 'beta_0^Euclid' in self.use_nuisance:
            b = pow(
                1. + self.z_mean,
                0.5 * data.mcmc_parameters['beta_1^Euclid']['current'] *
                data.mcmc_parameters['beta_1^Euclid']['scale']
            ) * data.mcmc_parameters['beta_0^Euclid'][
                'current'] * data.mcmc_parameters['beta_0^Euclid']['scale']
        else:
            b = np.sqrt(1. + self.z_mean)

        # Compute V_survey, for each given redshift bin,
        # which is the volume of a shell times the sky coverage:
        # TS; no more need for self., now comoving, exact integral solution
        V_survey = np.zeros(self.nbin, 'float64')
        for index_z in xrange(self.nbin):
            V_survey[index_z] = 4. / 3. * pi * self.fsky * (
                r[2 * index_z + 2]**3 - r[2 * index_z]**3)

        # If the fiducial model does not exist, recover the power spectrum and
        # store it, then exit.
        if self.fid_values_exist is False:
            pk = np.zeros((self.k_size, 2 * self.nbin + 1), 'float64')
            if self.use_linear_rsd:
                pk_lin = np.zeros((self.k_size, 2 * self.nbin + 1), 'float64')
            fid_file_path = os.path.join(self.data_directory,
                                         self.fiducial_file)
            with open(fid_file_path, 'w') as fid_file:
                fid_file.write('# Fiducial parameters')
                for key, value in io_mp.dictitems(data.mcmc_parameters):
                    fid_file.write(', %s = %.5g' %
                                   (key, value['current'] * value['scale']))
                fid_file.write('\n')
                for index_k in xrange(self.k_size):
                    for index_z in xrange(2 * self.nbin + 1):
                        #### Replace pk_cb with pk if no massive neutrinos (CLASS gives error)
                        pk[index_k,
                           index_z] = cosmo.pk_cb(self.k_fid[index_k],
                                                  self.z[index_z])
                        if self.use_linear_rsd:
                            #### Replace pk_cb with pk if no massive neutrinos (CLASS gives error)
                            pk_lin[index_k, index_z] = cosmo.pk_cb_lin(
                                self.k_fid[index_k], self.z[index_z])
                            fid_file.write('%.8g %.8g\n' %
                                           (pk[index_k, index_z],
                                            pk_lin[index_k, index_z]))
                        else:
                            fid_file.write('%.8g\n' % pk[index_k, index_z])
                for index_z in xrange(2 * self.nbin + 1):
                    fid_file.write('%.8g %.8g\n' % (H[index_z], D_A[index_z]))
                for index_z in xrange(self.nbin):
                    # TS; save fiducial survey volume V_fid
                    fid_file.write(
                        '%.8g %.8g %.8g\n' %
                        (sigma_r[index_z], V_survey[index_z], b[index_z]))
                # TS; save fiducial sigma_NL
                fid_file.write('%.8g\n' % sigma_NL)
            print('\n')
            warnings.warn(
                "Writing fiducial model in %s, for %s likelihood\n" %
                (self.data_directory + '/' + self.fiducial_file, self.name))
            return 1j

        # NOTE: Many following loops will be hidden in a very specific numpy
        # expression, for (a more than significant) speed-up. All the following
        # loops keep the same pattern.  The colon denotes the whole range of
        # indices, so beta_fid[:,index_z] denotes the array of length
        # self.k_size at redshift z[index_z]

        # Compute the beta_fid function, for observed spectrum,
        # beta_fid(k_fid,z) = 1/2b(z) * d log(P_nl_fid(k_fid,z))/d log a
        #                   = -1/2b(z)* (1+z) d log(P_nl_fid(k_fid,z))/dz

        if self.use_linear_rsd:
            beta_fid = -0.5 / self.b_fid * (1 + self.z_mean) * np.log(
                self.pk_lin_fid[:, 2::2] / self.pk_lin_fid[:, :-2:2]) / self.dz
        else:
            beta_fid = -0.5 / self.b_fid * (1 + self.z_mean) * np.log(
                self.pk_nl_fid[:, 2::2] / self.pk_nl_fid[:, :-2:2]) / self.dz

        # Compute the tilde P_fid(k_ref,z,mu) = H_fid(z)/D_A_fid(z)**2 ( 1 + beta_fid(k_fid,z)mu^2)^2 P_nl_fid(k_fid,z)exp ( -k_fid^2 mu^2 sigma_r_fid^2)
        self.tilde_P_fid = np.zeros((self.k_size, self.nbin, self.mu_size),
                                    'float64')

        self.tilde_P_fid = self.H_fid[na, 1::2, na] / (
            self.D_A_fid[na, 1::2, na])**2 * self.b_fid[na, :, na]**2 * (
                1. + beta_fid[:, :, na] * self.mu_fid[na, na, :]**2
            )**2 * (self.pk_nl_fid[:, 1::2, na]) * np.exp(
                -self.k_fid[:, na, na]**2 * self.mu_fid[na, na, :]**2 *
                (self.sigma_r_fid[na, :, na]**2 + self.sigma_NL_fid**2))

        ######################
        # TH PART
        ######################
        # Compute values of k based on k_fid (ref in paper), with formula (33 has to be corrected):
        # k^2 = ( (1-mu_fid^2) D_A_fid(z)^2/D_A(z)^2 + mu_fid^2 H(z)^2/H_fid(z)^2) k_fid ^ 2
        # So k = k (k_ref,z,mu)
        # TS; changed mu -> mu_fid
        self.k = np.zeros((self.k_size, 2 * self.nbin + 1, self.mu_size),
                          'float64')
        for index_k in xrange(self.k_size):
            for index_z in xrange(2 * self.nbin + 1):
                self.k[index_k, index_z, :] = np.sqrt(
                    (1. - self.mu_fid[:]**2) * self.D_A_fid[index_z]**2 /
                    D_A[index_z]**2 + self.mu_fid[:]**2 * H[index_z]**2 /
                    self.H_fid[index_z]**2) * self.k_fid[index_k]

        # TS; Compute values of mu based on mu_fid with
        # mu^2 = mu_fid^2 / (mu_fid^2 + ((H_fid*D_A_fid)/(H*D_A))^2)*(1 - mu_fid^2))
        self.mu = np.zeros((self.nbin, self.mu_size), 'float64')
        for index_z in xrange(self.nbin):
            self.mu[index_z, :] = np.sqrt(
                self.mu_fid[:]**2 /
                (self.mu_fid[:]**2 +
                 ((self.H_fid[2 * index_z + 1] * self.D_A_fid[2 * index_z + 1])
                  / (D_A[2 * index_z + 1] * H[2 * index_z + 1]))**2 *
                 (1. - self.mu_fid[:]**2)))

        # Recover the non-linear power spectrum from the cosmological module on all
        # the z_boundaries, to compute afterwards beta. This is pk_nl_th from the
        # notes.
        pk_nl_th = np.zeros((self.k_size, 2 * self.nbin + 1, self.mu_size),
                            'float64')
        if self.use_linear_rsd:
            pk_lin_th = np.zeros(
                (self.k_size, 2 * self.nbin + 1, self.mu_size), 'float64')

        # The next line is the bottleneck.
        # TODO: the likelihood could be sped up if this could be vectorised, either here,
        # or inside classy where there are three loops in the function get_pk
        # (maybe with a different strategy for the arguments of the function)
        #### Replace pk_cb with pk if no massive neutrinos (CLASS gives error)
        pk_nl_th = cosmo.get_pk_cb(self.k, self.z, self.k_size,
                                   2 * self.nbin + 1, self.mu_size)
        if self.use_linear_rsd:
            #### Replace pk_cb with pk if no massive neutrinos (CLASS gives error)
            pk_lin_th = cosmo.get_pk_cb_lin(self.k, self.z, self.k_size,
                                            2 * self.nbin + 1, self.mu_size)

        # Define the alpha function, that will characterize the theoretical
        # uncertainty. (TS; = theoretical error envelope)
        # TS; introduced new envelope (0:optimistic 1:pessimistic for ~2023), (for old envelope see commented)
        self.alpha = np.zeros((self.k_size, self.nbin, self.mu_size),
                              'float64')
        th_c1 = 0.75056
        th_c2 = 1.5120
        th_a1 = 0.014806
        th_a2 = 0.022047
        for index_z in xrange(self.nbin):
            k_z = cosmo.h() * pow(1. + self.z_mean[index_z], 2. /
                                  (2. + cosmo.n_s()))
            for index_mu in xrange(self.mu_size):
                for index_k in xrange(self.k_size):
                    if self.k[index_k, 2 * index_z + 1, index_mu] / k_z < 0.3:
                        self.alpha[index_k, index_z,
                                   index_mu] = th_a1 * np.exp(th_c1 * np.log10(
                                       self.k[index_k, 2 * index_z + 1,
                                              index_mu] / k_z))
                    else:
                        self.alpha[index_k, index_z,
                                   index_mu] = th_a2 * np.exp(th_c2 * np.log10(
                                       self.k[index_k, 2 * index_z + 1,
                                              index_mu] / k_z))

        # TS; Define fractional theoretical error variance R/P^2
        self.R_var = np.zeros((self.k_size, self.nbin, self.mu_size),
                              'float64')
        for index_k in xrange(self.k_size):
            for index_z in xrange(self.nbin):
                self.R_var[index_k, index_z, :] = self.V_fid[index_z] / (
                    2. * np.pi)**2 * self.k_CorrLength_hMpc * cosmo.h(
                    ) / self.z_CorrLength * self.dz * self.k_fid[
                        index_k]**2 * self.alpha[index_k, index_z, :]**2

        # TS; neutrino error obsolete since halofit update; corresponding lines were deleted

        # Compute the beta function for nl,
        # beta(k,z) = 1/2b(z) * d log(P_nl_th (k,z))/d log a
        #           = -1/2b(z) *(1+z) d log(P_nl_th (k,z))/dz
        beta_th = np.zeros((self.k_size, self.nbin, self.mu_size), 'float64')
        for index_k in xrange(self.k_size):
            for index_z in xrange(self.nbin):
                if self.use_linear_rsd:
                    beta_th[index_k, index_z, :] = -1. / (2. * b[index_z]) * (
                        1. + self.z_mean[index_z]) * np.log(
                            pk_lin_th[index_k, 2 * index_z + 2, :] /
                            pk_lin_th[index_k, 2 * index_z, :]) / (self.dz)
                else:
                    beta_th[index_k, index_z, :] = -1. / (2. * b[index_z]) * (
                        1. + self.z_mean[index_z]) * np.log(
                            pk_nl_th[index_k, 2 * index_z + 2, :] /
                            pk_nl_th[index_k, 2 * index_z, :]) / (self.dz)

        # Compute \tilde P_th(k,mu,z) = H(z)/D_A(z)^2 * (1 + beta(z,k) mu^2)^2 P_nl_th(k,z) exp(-k^2 mu^2 (sigma_r^2+sigma_NL^2))
        # TS; mu -> self.mu, added sigma_NL contribution
        self.tilde_P_th = np.zeros((self.k_size, self.nbin, self.mu_size),
                                   'float64')
        for index_k in xrange(self.k_size):
            for index_z in xrange(self.nbin):
                self.tilde_P_th[index_k, index_z, :] = H[2 * index_z + 1] / (
                    D_A[2 * index_z + 1]**2) * b[index_z]**2 * (
                        1. + beta_th[index_k, index_z, :] *
                        self.mu[index_z, :] * self.mu[index_z, :]
                    )**2 * pk_nl_th[index_k, 2 * index_z + 1, :] * np.exp(
                        -self.k[index_k, 2 * index_z + 1, :]**2 *
                        self.mu[index_z, :]**2 *
                        (sigma_r[index_z]**2 + sigma_NL**2))

        # Shot noise spectrum:
        # TS; Removed necessity of specifying a nuisance P_shot (not used in standard)
        # and inserted new self.V_fid
        self.P_shot = np.zeros((self.nbin), 'float64')
        for index_z in xrange(self.nbin):
            if 'P_shot' in self.use_nuisance:
                self.P_shot[index_z] = self.H_fid[2 * index_z + 1] / (
                    self.D_A_fid[2 * index_z + 1]**
                    2) * (data.mcmc_parameters['P_shot']['current'] *
                          data.mcmc_parameters['P_shot']['scale'] +
                          self.V_fid[index_z] / self.n_g[index_z])
            else:
                self.P_shot[index_z] = self.H_fid[2 * index_z + 1] / (
                    self.D_A_fid[2 * index_z + 1]**
                    2) * (self.V_fid[index_z] / self.n_g[index_z])

        # finally compute chi2, for each z_mean
        if self.use_zscaling:
            # TS; reformulated loops to include z-dependent kmax, mu -> mu_fid
            chi2 = 0.0
            index_kmax = 0
            delta_mu = self.mu_fid[1] - self.mu_fid[0]  # equally spaced
            integrand_low = 0.0
            integrand_hi = 0.0

            for index_z in xrange(self.nbin):
                # uncomment printers to show chi2 contribution from single bins
                #printer1 = chi2*delta_mu
                # TS; uncomment to display max. kmin (used to infer kmin~0.02):
                #kmin: #print("z=" + str(self.z_mean[index_z]) + " kmin=" + str(34.56/r[2*index_z+1]) + "\tor " + str(6.283/(r[2*index_z+2]-r[2*index_z])))
                for index_k in xrange(1, self.k_size):
                    if ((self.k_cut(self.z_mean[index_z], cosmo.h(),
                                    cosmo.n_s()) -
                         self.k_fid[self.k_size - index_k]) > -1.e-6):
                        index_kmax = self.k_size - index_k
                        break
                integrand_low = self.integrand(0, index_z, 0) * .5
                for index_k in xrange(1, index_kmax + 1):
                    integrand_hi = self.integrand(index_k, index_z, 0) * .5
                    chi2 += (integrand_hi + integrand_low) * .5 * (
                        self.k_fid[index_k] - self.k_fid[index_k - 1])
                    integrand_low = integrand_hi
                chi2 += integrand_low * (
                    self.k_cut(self.z_mean[index_z], cosmo.h(), cosmo.n_s()) -
                    self.k_fid[index_kmax])
                for index_mu in xrange(1, self.mu_size - 1):
                    integrand_low = self.integrand(0, index_z, index_mu)
                    for index_k in xrange(1, index_kmax + 1):
                        integrand_hi = self.integrand(index_k, index_z,
                                                      index_mu)
                        chi2 += (integrand_hi + integrand_low) * .5 * (
                            self.k_fid[index_k] - self.k_fid[index_k - 1])
                        integrand_low = integrand_hi
                    chi2 += integrand_low * (self.k_cut(
                        self.z_mean[index_z], cosmo.h(), cosmo.n_s()) -
                                             self.k_fid[index_kmax])
                integrand_low = self.integrand(0, index_z,
                                               self.mu_size - 1) * .5
                for index_k in xrange(1, index_kmax + 1):
                    integrand_hi = self.integrand(index_k, index_z,
                                                  self.mu_size - 1) * .5
                    chi2 += (integrand_hi + integrand_low) * .5 * (
                        self.k_fid[index_k] - self.k_fid[index_k - 1])
                    integrand_low = integrand_hi
                chi2 += integrand_low * (
                    self.k_cut(self.z_mean[index_z], cosmo.h(), cosmo.n_s()) -
                    self.k_fid[index_kmax])
                #printer2 = chi2*delta_mu-printer1
                #print("%s\t%s" % (self.z_mean[index_z], printer2))
            chi2 *= delta_mu

        else:
            # TS; original code with integrand() -> array_integrand()
            chi2 = 0.0
            mu_integrand_lo, mu_integrand_hi = 0.0, 0.0
            k_integrand = np.zeros(self.k_size, 'float64')
            for index_z in xrange(self.nbin):
                k_integrand = self.array_integrand(index_z, 0)
                mu_integrand_hi = np.sum(
                    (k_integrand[1:] + k_integrand[0:-1]) * .5 *
                    (self.k_fid[1:] - self.k_fid[:-1]))
                for index_mu in xrange(1, self.mu_size):
                    mu_integrand_lo = mu_integrand_hi
                    mu_integrand_hi = 0
                    k_integrand = self.array_integrand(index_z, index_mu)
                    mu_integrand_hi = np.sum(
                        (k_integrand[1:] + k_integrand[0:-1]) * .5 *
                        (self.k_fid[1:] - self.k_fid[:-1]))
                    # TS; mu -> mu_fid
                    chi2 += (mu_integrand_hi + mu_integrand_lo) / 2. * (
                        self.mu_fid[index_mu] - self.mu_fid[index_mu - 1])

        if 'beta_0^Euclid' in self.use_nuisance:
            chi2 += ((data.mcmc_parameters['beta_0^Euclid']['current'] *
                      data.mcmc_parameters['beta_0^Euclid']['scale'] - 1.) /
                     self.bias_accuracy)**2
            chi2 += ((data.mcmc_parameters['beta_1^Euclid']['current'] *
                      data.mcmc_parameters['beta_1^Euclid']['scale'] - 1.) /
                     self.bias_accuracy)**2

        return -chi2 / 2.