예제 #1
0
def m_Pk(k=np.logspace(-3, 0., 100), z=0.53, nl_model='trg'):
    print k
    cosmo = Class()

    CLASS_INPUT = {}

    CLASS_INPUT['Mnu'] = ([{'N_eff': 0.0, 'N_ncdm': 1, 'm_ncdm': 0.06, 'deg_ncdm': 3.0}], 'normal')
    CLASS_INPUT['Output_spectra'] = ([{'output': 'mPk', 'P_k_max_1/Mpc': 1, 'z_pk': z}], 'power')

    CLASS_INPUT['Nonlinear'] = ([{'non linear': nl_model}], 'power')
            
    verbose = {}
    #    'input_verbose': 1,
    #    'background_verbose': 1,
    #    'thermodynamics_verbose': 1,
    #    'perturbations_verbose': 1,
    #    'transfer_verbose': 1,
    #    'primordial_verbose': 1,
    #    'spectra_verbose': 1,
    #    'nonlinear_verbose': 1,
    #    'lensing_verbose': 1,
    #    'output_verbose': 1
    #    }

    cosmo.struct_cleanup()
    cosmo.empty()


    INPUTPOWER = []
    INPUTNORMAL = [{}]
    for key, value in CLASS_INPUT.iteritems():
        models, state = value
        if state == 'power':
            INPUTPOWER.append([{}]+models)
        else:
            INPUTNORMAL.extend(models)

        PRODPOWER = list(itertools.product(*INPUTPOWER))

        DICTARRAY = []
        for normelem in INPUTNORMAL:
            for powelem in PRODPOWER:  # itertools.product(*modpower):
                temp_dict = normelem.copy()
                for elem in powelem:
                    temp_dict.update(elem)
                DICTARRAY.append(temp_dict)

    scenario = {}
    for dic in DICTARRAY:
        scenario.update(dic)
    setting = cosmo.set(dict(verbose.items()+scenario.items()))
    cosmo.compute()
    pk_out = [] 
    for k_i in k: 
        pk_out.append(cosmo.pk(k_i,z))
    return pk_out 
 def setup(self):
     """
     Create an instance of Class and attach it to self.
     """
     self.cosmo = Class()
     self.cosmo.set(self.constants)
     self.cosmo.compute()
     self.cosmo.struct_cleanup()
     
예제 #3
0
    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()
예제 #4
0
    def setUp(self):
        """
        set up data used in the tests.
        setUp is called before each test function execution.
        """
        self.cosmo = Class()
        self.cosmo_newt = Class()

        self.verbose = {
            'input_verbose': 1,
            'background_verbose': 1,
            'thermodynamics_verbose': 1,
            'perturbations_verbose': 1,
            'transfer_verbose': 1,
            'primordial_verbose': 1,
            'spectra_verbose': 1,
            'nonlinear_verbose': 1,
            'lensing_verbose': 1,
            'output_verbose': 1}
        self.scenario = {}
예제 #5
0
    def __init__(self, cosmo=None):
        """
        Initialize the Model class. By default Model uses its own Class
        instance.

        cosmo = external Class instance. Default is None
        """
        if cosmo:
            self.cosmo = cosmo
        else:
            self.cosmo = Class()
        self.computed = {}
        self.texnames = {}
예제 #6
0
    def setUp(self):
        """
        set up data used in the tests.
        setUp is called before each test function execution.
        """
        self.cosmo = Class()

        self.verbose = {
            "input_verbose": 1,
            "background_verbose": 1,
            "thermodynamics_verbose": 1,
            "perturbations_verbose": 1,
            "transfer_verbose": 1,
            "primordial_verbose": 1,
            "spectra_verbose": 1,
            "nonlinear_verbose": 1,
            "lensing_verbose": 1,
            "output_verbose": 1,
        }
        self.scenario = {"lensing": "yes"}
class ClassCoreModule(object):
    
    def __init__(self, mapping=DEFAULT_PARAM_MAPPING, constants=CLASS_DEFAULT_PARAMS):
        """
        Core Module for the delegation of the computation of the cmb power
        spectrum to the Class wrapper classy.
        The defaults are for the 6 LambdaCDM cosmological parameters.
        
        :param mapping: (optional) dict mapping name of the parameter to the index
        :param constants: (optional) dict with constants overwriting CLASS defaults
        """
        self.mapping = mapping
        if constants is None:
            constants = {}
        self.constants = constants
        
    def __call__(self, ctx):
        p1 = ctx.getParams()
        
        params = self.constants.copy()
        for k,v in self.mapping.items():
            params[k] = p1[v]
        self.cosmo.set(params)
        self.cosmo.compute()
        if self.constants['lensing'] == 'yes':
            cls = self.cosmo.lensed_cl()
        else:
            cls = self.cosmo.raw_cl()
        Tcmb = self.cosmo.T_cmb()*1e6
        frac = Tcmb**2 * cls['ell'][2:] * (cls['ell'][2:] + 1) / 2. / pi
        ctx.add(CL_TT_KEY, frac*cls['tt'][2:])
        ctx.add(CL_TE_KEY, frac*cls['te'][2:])
        ctx.add(CL_EE_KEY, frac*cls['ee'][2:])
        ctx.add(CL_BB_KEY, frac*cls['bb'][2:])
        self.cosmo.struct_cleanup()

    def setup(self):
        """
        Create an instance of Class and attach it to self.
        """
        self.cosmo = Class()
        self.cosmo.set(self.constants)
        self.cosmo.compute()
        self.cosmo.struct_cleanup()
        
예제 #8
0
    def loglkl(self, params):
        cosmo = Class()
        cosmo.set(params)
        cosmo.compute()

        chi2 = 0.

        # for each point, compute angular distance da, radial distance dr,
        # volume distance dv, sound horizon at baryon drag rs_d,
        # theoretical prediction and chi2 contribution
        for i in range(self.num_points):

            da = cosmo.angular_distance(self.z[i])
            dr = self.z[i] / cosmo.Hubble(self.z[i])
            dv = pow(da * da * (1 + self.z[i]) * (1 + self.z[i]) * dr, 1. / 3.)
            rs = cosmo.rs_drag()

            if self.type[i] == 3:
                theo = dv / rs

            elif self.type[i] == 4:
                theo = dv

            elif self.type[i] == 5:
                theo = da / rs

            elif self.type[i] == 6:
                theo = 1. / cosmo.Hubble(self.z[i]) / rs

            elif self.type[i] == 7:
                theo = rs / dv

            chi2 += ((theo - self.data[i]) / self.error[i]) ** 2

        # return ln(L)
        # lkl = - 0.5 * chi2
        # return -2ln(L)
        lkl = chi2

        return lkl
예제 #9
0
def ComputeTransferData(settings, redshift):
    database_key = settings.copy()
    database_key.update({'redshift': tuple(redshift)})

    database = Database.Database(config.DATABASE_DIR)
    if database_key in database:
        return database[database_key], redshift
    else:
        cosmo = Class()
        cosmo.set(settings)
        cosmo.compute()

        outputData = [cosmo.get_transfer(z) for z in redshift]
        # Calculate d_g/4+psi
        for transfer_function_dict in outputData:
            transfer_function_dict["d_g/4 + psi"] = transfer_function_dict["d_g"]/4 + transfer_function_dict["psi"]
        # Now filter the relevant fields
        fields = TRANSFER_QUANTITIES + ["k (h/Mpc)"]
        outputData = [{field: outputData[i][field] for field in fields} for i in range(len(redshift))]

        database[database_key] = outputData
        return outputData, redshift
예제 #10
0
class classy(SlikPlugin):
    """
    Plugin for CLASS.
    Credit: Brent Follin, Teresa Hamill, Andy Scacco
    """

    #{cosmoslik name : class name} - This needs to be done even for variables with the same name (because of for loop in self.model.set)!
    name_mapping = {'As':'A_s',
                    'ns':'n_s',
                    'r':'r',
                    'k_c':'k_c',
                    'alpha_exp':'alpha_exp',
                    'nt':'n_t',
                    'ombh2':'omega_b',
                    'omch2':'omega_cdm',
                    'omnuh2':'omega_ncdm',
                    'tau':'tau_reio',
                    'H0':'H0',
                    'massive_neutrinos':'N_ncdm',
                    'massless_neutrinos':'N_ur',
                    'Yp':'YHe',
                    'pivot_scalar':'k_pivot',
                    #'Tcmb':'T_cmb',
                    #'P_k_max_hinvMpc':'P_k_max_h/Mpc'
                    #'w':'w0_fld',
                    #'nrun':'alpha_s',
                    #'omk':'Omega_k',
                    #'l_max_scalar':'l_max_scalars',
                    #'l_max_tensor':'l_max_tensors'
                    }


    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()


    def __call__(self,
                 ombh2,
                 omch2,
                 H0,
                 As,
                 ns,
                 k_c,
                 alpha_exp,
                 tau,
                 #omnuh2=0, #0.006  #None means that Class will take the default for this, maybe?
                 w=None,
                 r=None,
                 nrun=None,
                 omk=0,
                 Yp=None,
                 Tcmb=2.7255,
                 #massive_neutrinos=0,
                 massless_neutrinos=3.046,
                 l_max_scalar=3000,
                 l_max_tensor=3000,
                 pivot_scalar=0.05,
                 outputs=[],
                 **kwargs):


        
        self.model.set(output='tCl, lCl, pCl',
                       lensing='yes',
                       l_max_scalars=l_max_scalar,
                       **{self.name_mapping[k]:v for k,v in locals().items() 
                          if k in self.name_mapping and v is not None})
        self.model.compute()

        ell = arange(l_max_scalar+1)
        self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max_scalar)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
                           for x in ['TT','TE','EE','BB','PP','TP']}

        self.model.struct_cleanup()
        self.model.empty()
        
        return self.cmb_result

    def get_bao_observables(self, z):
        return {'H':self.model.Hubble(z),
                'D_A':self.model.angular_distance(z),
                'c':1.0,
                'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
예제 #11
0
class tsz_gal_cl:
    def __init__(self):
        # print 'Class for tSZ Cl'
        # self.ptilde = np.loadtxt(LIBDIR+'/aux_files/ptilde.txt')
        self.fort_lib_cl = cdll.LoadLibrary(LIBDIR+"/source/calc_cl")

        self.fort_lib_cl.calc_cl_.argtypes = [
                                    POINTER(c_double), #h0
                                    POINTER(c_double), #obh2
                                    POINTER(c_double), #och2
                                    POINTER(c_double), #mnu
                                    POINTER(c_double), #bias
                                    POINTER(c_double), #Mcut
                                    POINTER(c_double), #M1
                                    POINTER(c_double), #kappa
                                    POINTER(c_double), #sigma_Ncen
                                    POINTER(c_double), #alp_Nsat
                                    POINTER(c_double), #rmax
                                    POINTER(c_double), #rgs
                                    POINTER(c_int64), #pk_nk
                                    POINTER(c_int64), #pk_nz
                                    np.ctypeslib.ndpointer(dtype=np.double), #karr
                                    np.ctypeslib.ndpointer(dtype=np.double), #pkarr
                                    np.ctypeslib.ndpointer(dtype=np.double), #dndz
                                    POINTER(c_int64), #nz_dndz
                                    POINTER(c_double), #z1
                                    POINTER(c_double), #z2
                                    POINTER(c_double), #z1_ng
                                    POINTER(c_double), #z2_ng
                                    POINTER(c_int64), #nl
                                    np.ctypeslib.ndpointer(dtype=np.double), #ell
                                    np.ctypeslib.ndpointer(dtype=np.double), #gg
                                    np.ctypeslib.ndpointer(dtype=np.double), #gy
                                    np.ctypeslib.ndpointer(dtype=np.double), #tll
                                    POINTER(c_double), #ng(z1<z<z2)
                                    POINTER(c_int64), #flag_nu
                                    POINTER(c_int64), #flag_tll
                                    POINTER(c_int64), #nm
                                    POINTER(c_int64) #nz
                                    ]
        self.fort_lib_cl.calc_cl_.restype = c_void_p

        # Calcualtion setup
        self.kmin = 1e-3
        self.kmax = 100.
        self.zmax = 4. # should be consistent with fortran code
        self.nk_pk = 500
        self.nz_pk = 51

        # Class
        self.cosmo = Class()

    def get_tsz_cl(self,ell_arr,params,dndz,z1,z2,z1_ng,z2_ng,nm,nz):
        self.zmin = z1
        self.zmax = z2
        obh2 = params['obh2']
        och2 = params['och2']
        As = params['As']
        ns = params['ns']
        mnu = params['mnu']
        mass_bias = params['mass_bias']
        Mcut = params['Mcut']
        M1 = params['M1']
        kappa = params['kappa']
        sigma_Ncen = params['sigma_Ncen']
        alp_Nsat = params['alp_Nsat']
        rmax = params['rmax']
        rgs = params['rgs']
        flag_nu_logic = params['flag_nu']
        flag_tll_logic = params['flag_tll']
        if type(flag_nu_logic) != bool:
            print('flag_nu must be boolean.')
            sys.exit()
        if flag_nu_logic:
            flag_nu = 1
        else:
            flag_nu = 0
        if type(flag_tll_logic) != bool:
            print('flag_tll must be boolean.')
            sys.exit()
        if flag_tll_logic:
            flag_tll = 1
        else:
            flag_tll = 0
        
        if 'theta' in params.keys():
            theta = params['theta']
            pars = {'output':'mPk','100*theta_s':theta,
                    'omega_b':obh2,'omega_cdm':och2,
                    'A_s':As,'n_s':ns,\
                    'N_ur':0.00641,'N_ncdm':1,'m_ncdm':mnu/3.,\
                    'T_ncdm':0.71611,\
                    'P_k_max_h/Mpc': self.kmax,'z_max_pk':self.zmax,\
                    'deg_ncdm':3.}
            self.cosmo.set(pars)
            self.cosmo.compute()
            h0 = self.cosmo.h()
        elif 'h0' in params.keys():
            h0 = params['h0']
            pars = {'output':'mPk','h':h0,
                    'omega_b':obh2,'omega_cdm':och2,
                    'A_s':As,'n_s':ns,\
                    'N_ur':0.00641,'N_ncdm':1,'m_ncdm':mnu/3.,\
                    'T_ncdm':0.71611,\
                    'P_k_max_h/Mpc': self.kmax,'z_max_pk':self.zmax,\
                    'deg_ncdm':3.}
            print(pars)
            self.cosmo.set(pars)
            self.cosmo.compute()
        derived = self.cosmo.get_current_derived_parameters(['100*theta_s','sigma8'])
        vz = (self.cosmo.angular_distance(z2_ng)**3*(1+z2_ng)**3 \
              -self.cosmo.angular_distance(z1_ng)**3*(1+z1_ng)**3)
        vz = vz*h0**3*4.*np.pi/3.
        derived['vz'] = vz

    
        # get matter power spectra
        kh_arr = np.logspace(np.log10(self.kmin),np.log10(self.kmax),self.nk_pk)
        kh = np.zeros((self.nz_pk,self.nk_pk))
        pk = np.zeros((self.nz_pk,self.nk_pk))
        pk_zarr = np.linspace(self.zmin,self.zmax,self.nz_pk)
        for i in range(self.nz_pk):
            kh[i,:] = kh_arr
            if flag_nu == 0:
                pk[i,:] = np.array([self.cosmo.pk(k*h0,pk_zarr[i])*h0**3 for k in kh_arr])
            elif flag_nu == 1:
                pk[i,:] = np.array([self.cosmo.pk_cb(k*h0,pk_zarr[i])*h0**3 for k in kh_arr])

        # params
        h0_in = byref(c_double(h0))
        obh2_in = byref(c_double(obh2))
        och2_in = byref(c_double(och2))
        mnu_in = byref(c_double(mnu))
        mass_bias_in = byref(c_double(mass_bias))
        Mcut_in = byref(c_double(Mcut))
        M1_in = byref(c_double(M1))
        kappa_in = byref(c_double(kappa))
        sigma_Ncen_in = byref(c_double(sigma_Ncen))
        alp_Nsat_in = byref(c_double(alp_Nsat))
        rmax_in = byref(c_double(rmax))
        rgs_in = byref(c_double(rgs))
        flag_nu_in = byref(c_int64(flag_nu))
        flag_tll_in = byref(c_int64(flag_tll))

        # dNdz
        nz_dndz = byref(c_int64(len(dndz)))

        # integration setting
        z1_in = byref(c_double(self.zmin)) 
        z2_in = byref(c_double(self.zmax)) 
        
        # outputs
        nl = len(ell_arr)
        cl_gg = np.zeros((2,nl))
        cl_gy = np.zeros((2,nl))
        tll = np.zeros((nl*2,nl*2))
        ng = c_double(0.0)
        nl = c_int64(nl)
   
        self.fort_lib_cl.calc_cl_(
                h0_in, obh2_in, och2_in, mnu_in,\
                mass_bias_in, \
                Mcut_in, M1_in, kappa_in, sigma_Ncen_in, alp_Nsat_in,\
                rmax_in, rgs_in,\
                byref(c_int64(self.nk_pk)), byref(c_int64(self.nz_pk)),\
                np.array(kh),np.array(pk),\
                np.array(dndz),nz_dndz,\
                z1_in, z2_in,\
                byref(c_double(z1_ng)),byref(c_double(z2_ng)),\
                nl,np.array(ell_arr),\
                cl_gg,cl_gy,tll,ng,\
                flag_nu_in,flag_tll_in,\
                c_int64(nm), c_int64(nz)
                )

        self.cosmo.struct_cleanup()
        # k_h h/Mpc = k / Mpc
        # P_h Mpc^3/h^3 = P Mpc^3
        return cl_gg, cl_gy, tll, ng.value, derived, kh_arr*h0, pk_zarr, pk/h0**3
예제 #12
0
    primordial_spectrum['n_s'],
    'alpha_s':
    primordial_spectrum['α_s'],
    'k_pivot':
    primordial_spectrum['pivot'] / units.Mpc**(-1),
    'output':
    'mPk',
    'z_pk':
    str(z),
    'k_output_values':
    '{}, {}'.format(
        min(k_values) / units.Mpc**(-1),
        max(k_values) / units.Mpc**(-1),
    ),
}
cosmo = Class()
cosmo.set(class_params_specialized)
cosmo.compute()
power_class = asarray([cosmo.pk(k / units.Mpc**(-1), z)
                       for k in k_values]) * units.Mpc**3
plt.loglog(k_values, power_class, 'k--', label='CLASS')
plt.xlabel(rf'$k\, [\mathrm{{{unit_length}}}^{{-1}}]$')
plt.ylabel(rf'matter power $\mathrm{{[{unit_length}^3]}}$')
plt.legend(loc='best').get_frame().set_alpha(0.7)
plt.tight_layout()
plt.savefig(fig_file)

# Compare the power spectra of the realisations with
# the power spectrum from CLASS.
# Ignore the power at the largest scales due to low
# mode count. For particles, further ignore the power at
예제 #13
0
class TestClass(unittest.TestCase):
    """
    Testing Class and its wrapper classy on different cosmologies

    To run it, do
    ~] nosetest test_class.py

    It will run many times Class, on different cosmological scenarios, and
    everytime testing for different output possibilities (none asked, only mPk,
    etc..)

    """

    def setUp(self):
        """
        set up data used in the tests.
        setUp is called before each test function execution.
        """
        self.cosmo = Class()

        self.verbose = {
            "input_verbose": 1,
            "background_verbose": 1,
            "thermodynamics_verbose": 1,
            "perturbations_verbose": 1,
            "transfer_verbose": 1,
            "primordial_verbose": 1,
            "spectra_verbose": 1,
            "nonlinear_verbose": 1,
            "lensing_verbose": 1,
            "output_verbose": 1,
        }
        self.scenario = {"lensing": "yes"}

    def tearDown(self):
        self.cosmo.struct_cleanup()
        self.cosmo.empty()
        del self.scenario

    @parameterized.expand(
        itertools.product(
            ("LCDM", "Mnu", "Positive_Omega_k", "Negative_Omega_k", "Isocurvature_modes"),
            (
                {"output": ""},
                {"output": "mPk"},
                {"output": "tCl"},
                {"output": "tCl pCl lCl"},
                {"output": "mPk tCl lCl", "P_k_max_h/Mpc": 10},
                {"output": "nCl sCl"},
                {"output": "tCl pCl lCl nCl sCl"},
            ),
            ({"gauge": "newtonian"}, {"gauge": "sync"}),
            ({}, {"non linear": "halofit"}),
        )
    )
    def test_wrapper_implementation(self, name, scenario, gauge, nonlinear):
        """Create a few instances based on different cosmologies"""
        if name == "Mnu":
            self.scenario.update({"N_ncdm": 1, "m_ncdm": 0.06})
        elif name == "Positive_Omega_k":
            self.scenario.update({"Omega_k": 0.01})
        elif name == "Negative_Omega_k":
            self.scenario.update({"Omega_k": -0.01})
        elif name == "Isocurvature_modes":
            self.scenario.update({"ic": "ad,nid,cdi", "c_ad_cdi": -0.5})

        self.scenario.update(scenario)
        if scenario != {}:
            self.scenario.update(gauge)
        self.scenario.update(nonlinear)

        sys.stderr.write("\n\n---------------------------------\n")
        sys.stderr.write("| Test case %s |\n" % name)
        sys.stderr.write("---------------------------------\n")
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")

        setting = self.cosmo.set(dict(self.verbose.items() + self.scenario.items()))
        self.assertTrue(setting, "Class failed to initialize with input dict")

        cl_list = ["tCl", "lCl", "pCl", "nCl", "sCl"]

        # Depending on the cases, the compute should fail or not
        should_fail = True
        output = self.scenario["output"].split()
        for elem in output:
            if elem in ["tCl", "pCl"]:
                for elem2 in output:
                    if elem2 == "lCl":
                        should_fail = False
                        break

        if not should_fail:
            self.cosmo.compute()
        else:
            self.assertRaises(CosmoSevereError, self.cosmo.compute)
            return

        self.assertTrue(self.cosmo.state, "Class failed to go through all __init__ methods")
        if self.cosmo.state:
            print "--> Class is ready"
        # Depending
        if "output" in self.scenario.keys():
            # Positive tests
            output = self.scenario["output"]
            for elem in output.split():
                if elem in cl_list:
                    print "--> testing raw_cl function"
                    cl = self.cosmo.raw_cl(100)
                    self.assertIsNotNone(cl, "raw_cl returned nothing")
                    self.assertEqual(np.shape(cl["tt"])[0], 101, "raw_cl returned wrong size")
                if elem == "mPk":
                    print "--> testing pk function"
                    pk = self.cosmo.pk(0.1, 0)
                    self.assertIsNotNone(pk, "pk returned nothing")
            # Negative tests of output functions
            if not any([elem in cl_list for elem in output.split()]):
                print "--> testing absence of any Cl"
                self.assertRaises(CosmoSevereError, self.cosmo.raw_cl, 100)
            if "mPk" not in self.scenario["output"].split():
                print "--> testing absence of mPk"
                # args = (0.1, 0)
                self.assertRaises(CosmoSevereError, self.cosmo.pk, 0.1, 0)

    @parameterized.expand(
        itertools.product(("massless", "massive", "both"), ("photons", "massless", "exact"), ("t", "s, t"))
    )
    def test_tensors(self, scenario, method, modes):
        """Test the new tensor mode implementation"""
        self.scenario = {}
        if scenario == "massless":
            self.scenario.update({"N_eff": 3.046, "N_ncdm": 0})
        elif scenario == "massiv":
            self.scenario.update({"N_eff": 0, "N_ncdm": 2, "m_ncdm": "0.03, 0.04", "deg_ncdm": "2, 1"})
        elif scenario == "both":
            self.scenario.update({"N_eff": 1.5, "N_ncdm": 2, "m_ncdm": "0.03, 0.04", "deg_ncdm": "1, 0.5"})

        sys.stderr.write("\n\n---------------------------------\n")
        sys.stderr.write("| Test case: %s %s %s |\n" % (scenario, method, modes))
        sys.stderr.write("---------------------------------\n")
        self.scenario.update({"tensor method": method, "modes": modes, "output": "tCl, pCl"})
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")
        self.cosmo.set(dict(self.verbose.items() + self.scenario.items()))
        self.cosmo.compute()

    @parameterized.expand(itertools.izip(powerset(["100*theta_s", "Omega_dcdmdr"]), powerset([1.04, 0.20])))
    def test_shooting_method(self, variables, values):
        Omega_cdm = 0.25

        scenario = {"Omega_b": 0.05}

        for variable, value in zip(variables, values):
            scenario.update({variable: value})

        if "Omega_dcdmdr" in variables:
            scenario.update({"Gamma_dcdm": 100, "Omega_cdm": Omega_cdm - scenario["Omega_dcdmdr"]})
        else:
            scenario.update({"Omega_cdm": Omega_cdm})

        sys.stderr.write("\n\n---------------------------------\n")
        sys.stderr.write("| Test shooting: %s |\n" % (", ".join(variables)))
        sys.stderr.write("---------------------------------\n")
        for key, value in scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")

        scenario.update(self.verbose)
        self.assertTrue(self.cosmo.set(scenario), "Class failed to initialise with this input")
        self.assertRaises
        self.cosmo.compute()

        # Now, check that the values are properly extracted
        for variable, value in zip(variables, values):
            if variable == "100*theta_s":
                computed_value = self.cosmo.get_current_derived_parameters([variable])[variable]
                self.assertAlmostEqual(value, computed_value, places=5)
h = csm['h']
#zvec = np.array([0.0,0.5,1.0,2.0])
Pnl = e2py.get_pnonlin(csm,0.5)
kvec = Pnl['k']
kshape = kvec.shape

ClassyPars = e2py.emu_to_class(csm)
ClassyPars['Omega_Lambda']=0.0
ClassyPars['output']='mPk'
ClassyPars['non linear']='Halofit'
ClassyPars['format']='camb'
ClassyPars['P_k_max_h/Mpc']=10.
ClassyPars['k_per_decade_for_pk']=300.
ClassyPars['z_pk']=0.5#'0.0','0.5','1.0','2.0' 

cosmo=Class()
cosmo.set(ClassyPars)
cosmo.compute()

pHF = np.array([cosmo.pk(k*h,0.5)*h*h*h for k in kvec]).reshape(kshape)

Fig, axs = plt.subplots(2,1, sharex=True)
ax = axs[0]
ax.loglog(kvec,Pnl['P_lin'], c='gray', label = r"$P_\rm{lin}^\rm{CLASS}$")
ax.loglog(kvec,Pnl['P_nonlin'], c='blue', label = r"$P_\rm{nl}^\rm{EE}=P_\rm{lin}^\rm{CLASS} * B$")
ax.grid(True)
ax.set_ylabel(r"P(k,z=0.5) [$(\rm{Mpc}/h)^3$]")
ax.set_xlim([0.01,5])

ax = axs[1]
ax.axhline(y=0, c="black", ls=":")
예제 #15
0
class Model():
    def __init__(self, cosmo=None):
        """
        Initialize the Model class. By default Model uses its own Class
        instance.

        cosmo = external Class instance. Default is None
        """
        if cosmo:
            self.cosmo = cosmo
        else:
            self.cosmo = Class()
        self.computed = {}
        self.texnames = {}

    def __set_scale(self, axes, xscale, yscale):
        """
        Set scales for axes in axes array.

        axes = axes array (e.g. f, ax = plt.subplots(2,2))
        xscale = linear array of xscale.
        yscale = linear array of yscale.

        Scales are set once axes is flatten. Each plot is counted from left to
        right an from top to bottom.
        """
        for i, ax in enumerate(axes.flat):
            ax.set_xscale(xscale[i])
            ax.set_yscale(yscale[i])

    def __set_label(self, axes, xlabel, ylabel):
        """
        Set labels for axes in axes array.

        axes = axes array (e.g. f, ax = plt.subplots(2,2))
        xlabel = linear array of xlabels.
        ylabel = linear array of ylabels.

        Labels are set once axes is flatten. Each plot is counted from left to
        right an from top to bottom.
        """
        for i, ax in enumerate(axes.flat):
            ax.set_xlabel(xlabel[i])
            ax.set_ylabel(ylabel[i])

    def __store_cl(self, cl_dic):
        """
        Store cl's as (l*(l+1)/2pi)*cl, which is much more useful.
        """

        ell = cl_dic['ell'][2:]

        for cl, list_val in cl_dic.iteritems():
            list_val = list_val[2:]
            if (list_val == ell).all():
                cl_dic[cl] = list_val
                continue
            list_val = (ell * (ell + 1) / (2 * np.pi)) * list_val
            cl_dic[cl] = list_val  # Remove first two null items (l=0,1)

        return cl_dic

    def add_derived(self, varied_name, keys, value):
        """
        Add a derived parameter for varied_name dictionary.

        varied_name = varied variable's name.
        keys = list of keys in descending level.
        value = value to store for new dictionary key.
        """

        dic = self.computed[varied_name]

        for key in keys:
            if key not in dic:
                dic[key] = {}

            dic = dic[key]

        dic.update(value)

    def compute_models(self, params, varied_name, index_variable, values,
                       back=[], thermo=[], prim=[], pert=[], trans=[],
                       pk=[0.0001, 0.1, 100], extra=[], update=True,
                       cosmo_msg=False, texname=""):
        """
        Fill dic with the hi_class output structures for the model with given
        params, modifying the varied_name value with values.

        params = parameters to be set in Class. They must be in agreement with
                what is asked for.
        varied_name = the name of the variable you are modifying. It will be
                      used as key in dic assigned to its background structures.
        index_variable = variable's index in parameters_smg array.
        values = varied variable values you want to compute the cosmology for.
        back = list of variables to store from background. If 'all', store the
              whole dictionary.
        thermo = list of variables to store from thermodynamics. If 'all',
                  store the whole dictionary.
        prim = list of variables to store from primordial. If 'all', store the
               whole dictionary.
        pert = list of variables to store from perturbations. If 'all', store
               the whole dictionary.
        trans = list of variables to store from transfer. If 'all', store
                the whole dictionary. get_transfer accept two optional
                arguments: z=0 and output_format='class' (avaible options are
                'class' or 'camb'). If different values are desired, first
                item of trans must be {'z': value, 'output_format': value}.
        pk = list with the minimum and maximum k values to store the present
             matter power spectrum and the number of points [k_min, k_max,
             number_points]. Default [10^-4, 10^1, 100].
        extra = list of any of the method or objects defined in cosmo, e.g.
                w0_smg().  It will store {'method': cosmo.w0_smg()}
        update = if True update old computed[key] dictionary elsewise create a
                 new one.  Default: True.
        cosmo_msg = if True, print cosmo.compute() messages. Default: False.
        """

        key = varied_name

        if texname:
            self.set_texnames({varied_name: texname})
        elif key not in self.texnames:  # texname will not be set at this stage. No check required
            self.set_texnames({varied_name: varied_name})

        if (not update) or (key not in self.computed.keys()):
            self.computed[key] = od()

        for val in values:
            # key = "{}={}".format(varied_name, val)
            params["parameters_smg"] = inip.vary_params(params["parameters_smg"], [[index_variable, val]])

            # It might be after the try to not store empty dictionaries.
            # Nevertheless, I find more useful having them to keep track of
            # those failed and, perhaps, to implement a method to obtain them
            # with Omega_smg_debug.
            d = self.computed[key][val] = {}

            self.cosmo.empty()
            self.cosmo.set(params)

            try:
                self.cosmo.compute()
            except Exception, e:
                print "Error: skipping {}={}".format(key, val)
                if cosmo_msg:
                    print e

                continue

            d['tunned'] = self.cosmo.get_current_derived_parameters(['tuning_parameter'])['tuning_parameter']

            for lst in [[back, 'back', self.cosmo.get_background],
                        [thermo, 'thermo', self.cosmo.get_thermodynamics],
                        [prim, 'prim', self.cosmo.get_thermodynamics]]:
                if lst[0]:
                    output = lst[2]()
                    if lst[0][0] == 'all':
                        d[lst[1]] = output
                    else:
                        d[lst[1]] = {}
                        for item in back:
                            if type(item) is list:
                                d[lst[1]].update({item[0]: output[item[0]][item[1]]})
                            else:
                                d[lst[1]].update({item: output[item]})

            if pert:
                # Perturbation is tricky because it can accept two optional
                # argument for get_perturbations and this method returns a
                # dictionary {'kind_of_pert': [{variable: list_values}]}, where
                # each item in the list is for a k (chosen in params).
                if type(pert[0]) is dict:
                    output = self.cosmo.get_perturbations(pert[0]['z'], pert[0]['output_format'])
                    if pert[1] == 'all':
                        d['pert'] = output
                else:
                    output = self.cosmo.get_perturbations()
                    if pert[0] == 'all':
                        d['pert'] = output

                if (type(pert[0]) is not dict) and (pert[0] != 'all'):
                    d['pert'] = {}
                    for subkey, lst in output.iteritems():
                        d['pert'].update({subkey: []})
                        for n, kdic in enumerate(lst):  # Each item is for a k
                            d['pert'][subkey].append({})
                            for item in pert:
                                if type(item) is list:
                                    d['pert'][subkey][n].update({item[0]: kdic[item[0]][item[1]]})
                                else:
                                    d['pert'][subkey][n].update({item: kdic[item]})

            for i in extra:
                exec('d[i] = self.cosmo.{}'.format(i))

            try:
                d['cl'] = self.__store_cl(self.cosmo.raw_cl())
            except CosmoSevereError:
                pass

            try:
                d['lcl'] = self.__store_cl(self.cosmo.lensed_cl())
            except CosmoSevereError:
                pass

            try:
                d['dcl'] = self.cosmo.density_cl()
            except CosmoSevereError:
                pass


            if ("output" in self.cosmo.pars) and ('mPk' in self.cosmo.pars['output']):
                k_array = np.linspace(*pk)
                pk_array = np.array([self.cosmo.pk(k, 0) for k in k_array])

                d['pk'] = {'k': k_array, 'pk': pk_array}

            self.cosmo.struct_cleanup()
예제 #16
0
# coding: utf-8

# In[ ]:

# import classy module
from classy import Class


# In[ ]:

# create instance of the class "Class"
LambdaCDM = Class()
# pass input parameters
LambdaCDM.set({'omega_b':0.022032,'omega_cdm':0.12038,'h':0.67556,'A_s':2.215e-9,'n_s':0.9619,'tau_reio':0.0925})
LambdaCDM.set({'output':'tCl,pCl,lCl,mPk','lensing':'yes','P_k_max_1/Mpc':3.0})
# run class
LambdaCDM.compute()


# In[ ]:

# get all C_l output
cls = LambdaCDM.lensed_cl(2500)
# To check the format of cls
cls.viewkeys()


# In[ ]:

ll = cls['ell'][2:]
예제 #17
0
    'tau_reio': 0.06298803397344085,
    'gauge': 'new',
    'modes': 's',
    'N_ncdm': 1,
    'deg_ncdm': 3,
    'T_ncdm': 0.71611,
    'N_eff': 0.00641,
    'Minfl': 1e+16,
    'output': 'mPk tCl lCl',
    'P_k_max_h/Mpc': 1
}

params = [
    params1, params2, params3, params4, params5, params6, params7, params8,
    params9, params10, params11, params12
]

f = open('timings.txt', 'w')

for param in params:
    tic = datetime.datetime.now()
    for x in range(1, 10):
        cosmo = Class()
        cosmo.set(param)
        cosmo.compute()
        cosmo.struct_cleanup()
        cosmo.empty()
    toc = datetime.datetime.now()
    print(toc - tic)
    f.write('{}\n'.format(toc - tic))
    def get_theoretical_TT_TE_EE_unbinned_power_spec_D_ell(self, class_dict):
        ellmin = self.lmin_class
        ellmax = self.plmax
        cosmo = Class()
        cosmo.set(class_dict)
        cosmo.compute()
        cls = cosmo.lensed_cl(3000)
        cosmo.struct_cleanup()
        cosmo.empty()

        #get in units of microkelvin squared
        T_fac=(self.T_cmb*1e6)**2

        ell=cls['ell']
        D_fac=ell*(ell+1.)/(2*np.pi)

        Dltt=(T_fac*D_fac*cls['tt'])[ellmin:ellmax+1]
        Dlte=(T_fac*D_fac*cls['te'])[ellmin:ellmax+1]
        Dlee=(T_fac*D_fac*cls['ee'])[ellmin:ellmax+1]
        return cls['ell'][ellmin:ellmax+1], Dltt, Dlte, Dlee
예제 #19
0
class TestClass(unittest.TestCase):
    """
    Testing Class and its wrapper classy on different cosmologies

    To run it, do
    ~] nosetest test_class.py

    It will run many times Class, on different cosmological scenarios, and
    everytime testing for different output possibilities (none asked, only mPk,
    etc..)

    """
    def setUp(self):
        """
        set up data used in the tests.
        setUp is called before each test function execution.
        """
        self.cosmo = Class()

        self.verbose = {
            'input_verbose': 1,
            'background_verbose': 1,
            'thermodynamics_verbose': 1,
            'perturbations_verbose': 1,
            'transfer_verbose': 1,
            'primordial_verbose': 1,
            'spectra_verbose': 1,
            'nonlinear_verbose': 1,
            'lensing_verbose': 1,
            'output_verbose': 1
        }
        self.scenario = {'lensing': 'yes'}

    def tearDown(self):
        self.cosmo.struct_cleanup()
        self.cosmo.empty()
        del self.scenario

    @parameterized.expand(
        itertools.product((
            'LCDM',
            'Mnu',
            'Positive_Omega_k',
            'Negative_Omega_k',
            'Isocurvature_modes',
        ), ({
            'output': ''
        }, {
            'output': 'mPk'
        }, {
            'output': 'tCl'
        }, {
            'output': 'tCl pCl lCl'
        }, {
            'output': 'mPk tCl lCl',
            'P_k_max_h/Mpc': 10
        }, {
            'output': 'nCl sCl'
        }, {
            'output': 'tCl pCl lCl nCl sCl'
        }), ({
            'gauge': 'newtonian'
        }, {
            'gauge': 'sync'
        }), ({}, {
            'non linear': 'halofit'
        })))
    def test_wrapper_implementation(self, name, scenario, gauge, nonlinear):
        """Create a few instances based on different cosmologies"""
        if name == 'Mnu':
            self.scenario.update({'N_ncdm': 1, 'm_ncdm': 0.06})
        elif name == 'Positive_Omega_k':
            self.scenario.update({'Omega_k': 0.01})
        elif name == 'Negative_Omega_k':
            self.scenario.update({'Omega_k': -0.01})
        elif name == 'Isocurvature_modes':
            self.scenario.update({'ic': 'ad,nid,cdi', 'c_ad_cdi': -0.5})

        self.scenario.update(scenario)
        if scenario != {}:
            self.scenario.update(gauge)
        self.scenario.update(nonlinear)

        sys.stderr.write('\n\n---------------------------------\n')
        sys.stderr.write('| Test case %s |\n' % name)
        sys.stderr.write('---------------------------------\n')
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")

        setting = self.cosmo.set(
            dict(self.verbose.items() + self.scenario.items()))
        self.assertTrue(setting, "Class failed to initialize with input dict")

        cl_list = ['tCl', 'lCl', 'pCl', 'nCl', 'sCl']

        # Depending on the cases, the compute should fail or not
        should_fail = True
        output = self.scenario['output'].split()
        for elem in output:
            if elem in ['tCl', 'pCl']:
                for elem2 in output:
                    if elem2 == 'lCl':
                        should_fail = False
                        break

        if not should_fail:
            self.cosmo.compute()
        else:
            self.assertRaises(CosmoSevereError, self.cosmo.compute)
            return

        self.assertTrue(self.cosmo.state,
                        "Class failed to go through all __init__ methods")
        if self.cosmo.state:
            print '--> Class is ready'
        # Depending
        if 'output' in self.scenario.keys():
            # Positive tests
            output = self.scenario['output']
            for elem in output.split():
                if elem in cl_list:
                    print '--> testing raw_cl function'
                    cl = self.cosmo.raw_cl(100)
                    self.assertIsNotNone(cl, "raw_cl returned nothing")
                    self.assertEqual(
                        np.shape(cl['tt'])[0], 101,
                        "raw_cl returned wrong size")
                if elem == 'mPk':
                    print '--> testing pk function'
                    pk = self.cosmo.pk(0.1, 0)
                    self.assertIsNotNone(pk, "pk returned nothing")
            # Negative tests of output functions
            if not any([elem in cl_list for elem in output.split()]):
                print '--> testing absence of any Cl'
                self.assertRaises(CosmoSevereError, self.cosmo.raw_cl, 100)
            if 'mPk' not in self.scenario['output'].split():
                print '--> testing absence of mPk'
                #args = (0.1, 0)
                self.assertRaises(CosmoSevereError, self.cosmo.pk, 0.1, 0)

    @parameterized.expand(
        itertools.product(('massless', 'massive', 'both'),
                          ('photons', 'massless', 'exact'), ('t', 's, t')))
    def test_tensors(self, scenario, method, modes):
        """Test the new tensor mode implementation"""
        self.scenario = {}
        if scenario == 'massless':
            self.scenario.update({'N_eff': 3.046, 'N_ncdm': 0})
        elif scenario == 'massiv':
            self.scenario.update({
                'N_eff': 0,
                'N_ncdm': 2,
                'm_ncdm': '0.03, 0.04',
                'deg_ncdm': '2, 1'
            })
        elif scenario == 'both':
            self.scenario.update({
                'N_eff': 1.5,
                'N_ncdm': 2,
                'm_ncdm': '0.03, 0.04',
                'deg_ncdm': '1, 0.5'
            })

        sys.stderr.write('\n\n---------------------------------\n')
        sys.stderr.write('| Test case: %s %s %s |\n' %
                         (scenario, method, modes))
        sys.stderr.write('---------------------------------\n')
        self.scenario.update({
            'tensor method': method,
            'modes': modes,
            'output': 'tCl, pCl'
        })
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")
        self.cosmo.set(dict(self.verbose.items() + self.scenario.items()))
        self.cosmo.compute()

    @parameterized.expand(
        itertools.izip(
            powerset(['100*theta_s', 'Omega_dcdmdr']),
            powerset([1.04, 0.20]),
        ))
    def test_shooting_method(self, variables, values):
        Omega_cdm = 0.25

        scenario = {
            'Omega_b': 0.05,
        }

        for variable, value in zip(variables, values):
            scenario.update({variable: value})

        if 'Omega_dcdmdr' in variables:
            scenario.update({
                'Gamma_dcdm': 100,
                'Omega_cdm': Omega_cdm - scenario['Omega_dcdmdr']
            })
        else:
            scenario.update({'Omega_cdm': Omega_cdm})

        sys.stderr.write('\n\n---------------------------------\n')
        sys.stderr.write('| Test shooting: %s |\n' % (', '.join(variables)))
        sys.stderr.write('---------------------------------\n')
        for key, value in scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")

        scenario.update(self.verbose)
        self.assertTrue(self.cosmo.set(scenario),
                        "Class failed to initialise with this input")
        self.assertRaises
        self.cosmo.compute()

        # Now, check that the values are properly extracted
        for variable, value in zip(variables, values):
            if variable == '100*theta_s':
                computed_value = self.cosmo.get_current_derived_parameters(
                    [variable])[variable]
                self.assertAlmostEqual(value, computed_value, places=5)
예제 #20
0
class Sampler:
    def __init__(self, NSIDE):
        self.NSIDE = NSIDE
        self.Npix = 12 * NSIDE**2
        print("Initialising sampler")
        self.cosmo = Class()
        print("Maps")
        self.templates_map, self.templates_var = aggregate_pixels_params(
            get_pixels_params(self.NSIDE))
        print("betas")
        self.matrix_mean, self.matrix_var = aggregate_mixing_params(
            get_mixing_matrix_params(self.NSIDE))
        print("Cosmo params")
        self.cosmo_means = np.array(COSMO_PARAMS_MEANS)
        self.cosmo_var = (np.diag(COSMO_PARAMS_SIGMA) / 2)**2

        plt.hist(self.templates_map)
        plt.savefig("mean_values.png")
        plt.close()
        plt.hist(self.templates_var)
        plt.savefig("std_values.png")
        plt.close()
        self.instrument = pysm.Instrument(
            get_instrument('litebird', self.NSIDE))
        self.components = [CMB(), Dust(150.), Synchrotron(150.)]
        self.mixing_matrix = MixingMatrix(*self.components)
        self.mixing_matrix_evaluator = self.mixing_matrix.evaluator(
            self.instrument.Frequencies)
        print("End of initialisation")

    def __getstate__(self):
        state_dict = self.__dict__.copy()
        del state_dict["mixing_matrix_evaluator"]
        del state_dict["cosmo"]
        del state_dict["mixing_matrix"]
        del state_dict["components"]
        return state_dict

    def __setstate__(self, state):
        self.__dict__.update(state)
        self.cosmo = Class()
        self.components = [CMB(), Dust(150.), Synchrotron(150.)]
        self.mixing_matrix = MixingMatrix(*self.components)
        self.mixing_matrix_evaluator = self.mixing_matrix.evaluator(
            self.instrument.Frequencies)

    def sample_normal(self, mu, sigma, s=None):
        return np.random.multivariate_normal(mu, sigma, s)

    def sample_model_parameters(self):
        #sampled_cosmo = self.sample_normal(self.cosmo_means, self.cosmo_var)
        sampled_cosmo = np.array([
            0.9665, 0.02242, 0.11933, 1.04101, 3.047, 0.0561
        ]) - 2 * np.array(COSMO_PARAMS_SIGMA)
        #sampled_beta = self.sample_normal(self.matrix_mean, self.matrix_var).reshape((self.Npix, -1), order = "F")
        sampled_beta = self.matrix_mean.reshape((self.Npix, -1), order="F")
        return sampled_cosmo, sampled_beta

    def sample_CMB_QU(self, cosmo_params):
        params = {
            'output': OUTPUT_CLASS,
            'l_max_scalars': L_MAX_SCALARS,
            'lensing': LENSING
        }
        params.update(cosmo_params)
        self.cosmo.set(params)
        self.cosmo.compute()
        cls = self.cosmo.lensed_cl(L_MAX_SCALARS)
        eb_tb = np.zeros(shape=cls["tt"].shape)
        _, Q, U = hp.synfast(
            (cls['tt'], cls['ee'], cls['bb'], cls['te'], eb_tb, eb_tb),
            nside=self.NSIDE,
            new=True)
        self.cosmo.struct_cleanup()
        self.cosmo.empty()
        return Q, U

    def sample_mixing_matrix(self, betas):
        mat_pixels = []
        for i in range(self.Npix):
            m = self.mixing_matrix_evaluator(betas[i, :])
            mat_pixels.append(m)

        mixing_matrix = np.stack(mat_pixels, axis=0)
        return mixing_matrix

    def sample_model(self):
        cosmo_params, sampled_beta = self.sample_model_parameters()
        #maps = self.sample_normal(self.templates_map, self.templates_var)

        cosmo_dict = {
            l[0]: l[1]
            for l in zip(COSMO_PARAMS_NAMES, cosmo_params.tolist())
        }
        tuple_QU = self.sample_CMB_QU(cosmo_dict)
        map_CMB = np.stack(tuple_QU, axis=1)
        '''
        mixing_matrix = self.sample_mixing_matrix(sampled_beta)
        map_Sync = np.stack([maps[0:self.Npix], maps[self.Npix:2*self.Npix]], axis = 1)
        map_Dust = np.stack([maps[2*self.Npix:3*self.Npix], maps[3*self.Npix:]], axis = 1)
        entire_map = np.stack([map_CMB, map_Dust, map_Sync], axis = 1)

        dot_prod = []
        for j in range(self.Npix):
            m = np.dot(mixing_matrix[j, :, :], entire_map[j, :, :])
            dot_prod.append(m)

        sky_map = np.stack(dot_prod, axis = 0)
        '''
        sky_map = map_CMB

        return {
            "sky_map": sky_map,
            "cosmo_params": cosmo_params,
            "betas": sampled_beta
        }


#sampler = Sampler(NSIDE)
#r = sampler.sample_model(1)
#['beta_d' 'temp' 'beta_pl']
#['beta_d' 'temp']
#['beta_pl']
예제 #21
0
class classy(BoltzmannBase):
    r"""
    CLASS cosmological Boltzmann code \cite{Blas:2011rf}.
    """
    # Name of the Class repo/folder and version to download
    _classy_repo_name = "lesgourg/class_public"
    _min_classy_version = "v2.9.3"
    _classy_min_gcc_version = "6.4"  # Lower ones are possible atm, but leak memory!
    _classy_repo_version = os.environ.get('CLASSY_REPO_VERSION',
                                          _min_classy_version)

    def initialize(self):
        """Importing CLASS from the correct path, if given, and if not, globally."""
        # Allow global import if no direct path specification
        allow_global = not self.path
        if not self.path and self.packages_path:
            self.path = self.get_path(self.packages_path)
        self.classy_module = self.is_installed(path=self.path,
                                               allow_global=allow_global)
        if not self.classy_module:
            raise NotInstalledError(
                self.log, "Could not find CLASS. Check error message above.")
        from classy import Class, CosmoSevereError, CosmoComputationError
        global CosmoComputationError, CosmoSevereError
        self.classy = Class()
        super().initialize()
        # Add general CLASS stuff
        self.extra_args["output"] = self.extra_args.get("output", "")
        if "sBBN file" in self.extra_args:
            self.extra_args["sBBN file"] = (
                self.extra_args["sBBN file"].format(classy=self.path))
        # Derived parameters that may not have been requested, but will be necessary later
        self.derived_extra = []
        self.log.info("Initialized!")

    def must_provide(self, **requirements):
        # Computed quantities required by the likelihood
        super().must_provide(**requirements)
        for k, v in self._must_provide.items():
            # Products and other computations
            if k == "Cl":
                if any(("t" in cl.lower()) for cl in v):
                    self.extra_args["output"] += " tCl"
                if any(
                    (("e" in cl.lower()) or ("b" in cl.lower())) for cl in v):
                    self.extra_args["output"] += " pCl"
                # For modern experiments, always lensed Cl's!
                self.extra_args["output"] += " lCl"
                self.extra_args["lensing"] = "yes"
                # For l_max_scalars, remember previous entries.
                self.extra_args["l_max_scalars"] = \
                    max(self.extra_args.get("l_max_scalars", 0), max(v.values()))
                self.collectors[k] = Collector(
                    method="lensed_cl",
                    kwargs={"lmax": self.extra_args["l_max_scalars"]})
                if 'T_cmb' not in self.derived_extra:
                    self.derived_extra += ['T_cmb']
            elif k == "Hubble":
                self.collectors[k] = Collector(method="Hubble",
                                               args=[np.atleast_1d(v["z"])],
                                               args_names=["z"],
                                               arg_array=0)
            elif k == "angular_diameter_distance":
                self.collectors[k] = Collector(method="angular_distance",
                                               args=[np.atleast_1d(v["z"])],
                                               args_names=["z"],
                                               arg_array=0)
            elif k == "comoving_radial_distance":
                self.collectors[k] = Collector(method="z_of_r",
                                               args_names=["z"],
                                               args=[np.atleast_1d(v["z"])])
            elif isinstance(k, tuple) and k[0] == "Pk_grid":
                self.extra_args["output"] += " mPk"
                v = deepcopy(v)
                self.add_P_k_max(v.pop("k_max"), units="1/Mpc")
                # NB: Actually, only the max z is used, and the actual sampling in z
                # for computing P(k,z) is controlled by `perturb_sampling_stepsize`
                # (default: 0.1). But let's leave it like this in case this changes
                # in the future.
                self.add_z_for_matter_power(v.pop("z"))

                if v["nonlinear"] and "non linear" not in self.extra_args:
                    self.extra_args["non linear"] = non_linear_default_code
                pair = k[2:]
                if pair == ("delta_tot", "delta_tot"):
                    v["only_clustering_species"] = False
                elif pair == ("delta_nonu", "delta_nonu"):
                    v["only_clustering_species"] = True
                else:
                    raise LoggedError(self.log, "NotImplemented in CLASS: %r",
                                      pair)
                self.collectors[k] = Collector(method="get_pk_and_k_and_z",
                                               kwargs=v,
                                               post=(lambda P, kk, z:
                                                     (kk, z, np.array(P).T)))
            elif isinstance(k, tuple) and k[0] == "sigma_R":
                raise LoggedError(
                    self.log,
                    "Classy sigma_R not implemented as yet - use CAMB only")
            elif v is None:
                k_translated = self.translate_param(k)
                if k_translated not in self.derived_extra:
                    self.derived_extra += [k_translated]
            else:
                raise LoggedError(self.log, "Requested product not known: %r",
                                  {k: v})
        # Derived parameters (if some need some additional computations)
        if any(("sigma8" in s) for s in self.output_params or requirements):
            self.extra_args["output"] += " mPk"
            self.add_P_k_max(1, units="1/Mpc")
        # Adding tensor modes if requested
        if self.extra_args.get("r") or "r" in self.input_params:
            self.extra_args["modes"] = "s,t"
        # If B spectrum with l>50, or lensing, recommend using Halofit
        cls = self._must_provide.get("Cl", {})
        has_BB_l_gt_50 = (any(("b" in cl.lower()) for cl in cls)
                          and max(cls[cl]
                                  for cl in cls if "b" in cl.lower()) > 50)
        has_lensing = any(("p" in cl.lower()) for cl in cls)
        if (has_BB_l_gt_50
                or has_lensing) and not self.extra_args.get("non linear"):
            self.log.warning(
                "Requesting BB for ell>50 or lensing Cl's: "
                "using a non-linear code is recommended (and you are not "
                "using any). To activate it, set "
                "'non_linear: halofit|hmcode|...' in classy's 'extra_args'.")
        # Cleanup of products string
        self.extra_args["output"] = " ".join(
            set(self.extra_args["output"].split()))
        self.check_no_repeated_input_extra()

    def add_z_for_matter_power(self, z):
        if getattr(self, "z_for_matter_power", None) is None:
            self.z_for_matter_power = np.empty(0)
        self.z_for_matter_power = np.flip(np.sort(
            np.unique(
                np.concatenate([self.z_for_matter_power,
                                np.atleast_1d(z)]))),
                                          axis=0)
        self.extra_args["z_pk"] = " ".join(
            ["%g" % zi for zi in self.z_for_matter_power])

    def add_P_k_max(self, k_max, units):
        r"""
        Unifies treatment of :math:`k_\mathrm{max}` for matter power spectrum:
        ``P_k_max_[1|h]/Mpc]``.

        Make ``units="1/Mpc"|"h/Mpc"``.
        """
        # Fiducial h conversion (high, though it may slow the computations)
        h_fid = 1
        if units == "h/Mpc":
            k_max *= h_fid
        # Take into account possible manual set of P_k_max_***h/Mpc*** through extra_args
        k_max_old = self.extra_args.pop(
            "P_k_max_1/Mpc", h_fid * self.extra_args.pop("P_k_max_h/Mpc", 0))
        self.extra_args["P_k_max_1/Mpc"] = max(k_max, k_max_old)

    def set(self, params_values_dict):
        # If no output requested, remove arguments that produce an error
        # (e.g. complaints if halofit requested but no Cl's computed.)
        # Needed for facilitating post-processing
        if not self.extra_args["output"]:
            for k in ["non linear"]:
                self.extra_args.pop(k, None)
        # Prepare parameters to be passed: this-iteration + extra
        args = {
            self.translate_param(p): v
            for p, v in params_values_dict.items()
        }
        args.update(self.extra_args)
        # Generate and save
        self.log.debug("Setting parameters: %r", args)
        self.classy.set(**args)

    def calculate(self, state, want_derived=True, **params_values_dict):
        # Set parameters
        self.set(params_values_dict)
        # Compute!
        try:
            self.classy.compute()
        # "Valid" failure of CLASS: parameters too extreme -> log and report
        except CosmoComputationError as e:
            if self.stop_at_error:
                self.log.error(
                    "Computation error (see traceback below)! "
                    "Parameters sent to CLASS: %r and %r.\n"
                    "To ignore this kind of error, make 'stop_at_error: False'.",
                    state["params"], dict(self.extra_args))
                raise
            else:
                self.log.debug("Computation of cosmological products failed. "
                               "Assigning 0 likelihood and going on. "
                               "The output of the CLASS error was %s" % e)
            return False
        # CLASS not correctly initialized, or input parameters not correct
        except CosmoSevereError:
            self.log.error(
                "Serious error setting parameters or computing results. "
                "The parameters passed were %r and %r. To see the original "
                "CLASS' error traceback, make 'debug: True'.", state["params"],
                self.extra_args)
            raise  # No LoggedError, so that CLASS traceback gets printed
        # Gather products
        for product, collector in self.collectors.items():
            # Special case: sigma8 needs H0, which cannot be known beforehand:
            if "sigma8" in self.collectors:
                self.collectors["sigma8"].args[0] = 8 / self.classy.h()
            method = getattr(self.classy, collector.method)
            arg_array = self.collectors[product].arg_array
            if arg_array is None:
                state[product] = method(*self.collectors[product].args,
                                        **self.collectors[product].kwargs)
            elif isinstance(arg_array, int):
                state[product] = np.zeros(
                    len(self.collectors[product].args[arg_array]))
                for i, v in enumerate(
                        self.collectors[product].args[arg_array]):
                    args = (
                        list(self.collectors[product].args[:arg_array]) + [v] +
                        list(self.collectors[product].args[arg_array + 1:]))
                    state[product][i] = method(
                        *args, **self.collectors[product].kwargs)
            elif arg_array in self.collectors[product].kwargs:
                value = np.atleast_1d(
                    self.collectors[product].kwargs[arg_array])
                state[product] = np.zeros(value.shape)
                for i, v in enumerate(value):
                    kwargs = deepcopy(self.collectors[product].kwargs)
                    kwargs[arg_array] = v
                    state[product][i] = method(*self.collectors[product].args,
                                               **kwargs)
            if collector.post:
                state[product] = collector.post(*state[product])
        # Prepare derived parameters
        d, d_extra = self._get_derived_all(derived_requested=want_derived)
        if want_derived:
            state["derived"] = {p: d.get(p) for p in self.output_params}
            # Prepare necessary extra derived parameters
        state["derived_extra"] = deepcopy(d_extra)

    def _get_derived_all(self, derived_requested=True):
        """
        Returns a dictionary of derived parameters with their values,
        using the *current* state (i.e. it should only be called from
        the ``compute`` method).

        Parameter names are returned in CLASS nomenclature.

        To get a parameter *from a likelihood* use `get_param` instead.
        """
        # TODO: fails with derived_requested=False
        # Put all parameters in CLASS nomenclature (self.derived_extra already is)
        requested = [
            self.translate_param(p)
            for p in (self.output_params if derived_requested else [])
        ]
        requested_and_extra = dict.fromkeys(
            set(requested).union(set(self.derived_extra)))
        # Parameters with their own getters
        if "rs_drag" in requested_and_extra:
            requested_and_extra["rs_drag"] = self.classy.rs_drag()
        if "Omega_nu" in requested_and_extra:
            requested_and_extra["Omega_nu"] = self.classy.Omega_nu
        if "T_cmb" in requested_and_extra:
            requested_and_extra["T_cmb"] = self.classy.T_cmb()
        # Get the rest using the general derived param getter
        # No need for error control: classy.get_current_derived_parameters is passed
        # every derived parameter not excluded before, and cause an error, indicating
        # which parameters are not recognized
        requested_and_extra.update(
            self.classy.get_current_derived_parameters(
                [p for p, v in requested_and_extra.items() if v is None]))
        # Separate the parameters before returning
        # Remember: self.output_params is in sampler nomenclature,
        # but self.derived_extra is in CLASS
        derived = {
            p: requested_and_extra[self.translate_param(p)]
            for p in self.output_params
        }
        derived_extra = {p: requested_and_extra[p] for p in self.derived_extra}
        return derived, derived_extra

    def get_Cl(self, ell_factor=False, units="FIRASmuK2"):
        try:
            cls = deepcopy(self._current_state["Cl"])
        except:
            raise LoggedError(
                self.log,
                "No Cl's were computed. Are you sure that you have requested them?"
            )
        # unit conversion and ell_factor
        ells_factor = ((cls["ell"] + 1) * cls["ell"] /
                       (2 * np.pi))[2:] if ell_factor else 1
        units_factor = self._cmb_unit_factor(
            units, self._current_state['derived_extra']['T_cmb'])

        for cl in cls:
            if cl not in ['pp', 'ell']:
                cls[cl][2:] *= units_factor**2 * ells_factor
        if "pp" in cls and ell_factor:
            cls['pp'][2:] *= ells_factor**2 * (2 * np.pi)
        return cls

    def _get_z_dependent(self, quantity, z):
        try:
            z_name = next(k for k in ["redshifts", "z"]
                          if k in self.collectors[quantity].kwargs)
            computed_redshifts = self.collectors[quantity].kwargs[z_name]
        except StopIteration:
            computed_redshifts = self.collectors[quantity].args[
                self.collectors[quantity].args_names.index("z")]
        i_kwarg_z = np.concatenate(
            [np.where(computed_redshifts == zi)[0] for zi in np.atleast_1d(z)])
        values = np.array(deepcopy(self._current_state[quantity]))
        if quantity == "comoving_radial_distance":
            values = values[0]
        return values[i_kwarg_z]

    def close(self):
        self.classy.empty()

    def get_can_provide_params(self):
        names = [
            'Omega_Lambda', 'Omega_cdm', 'Omega_b', 'Omega_m', 'rs_drag',
            'z_reio', 'YHe', 'Omega_k', 'age', 'sigma8'
        ]
        for name, mapped in self.renames.items():
            if mapped in names:
                names.append(name)
        return names

    def get_version(self):
        return getattr(self.classy_module, '__version__', None)

    # Installation routines

    @classmethod
    def get_path(cls, path):
        return os.path.realpath(os.path.join(path, "code", cls.__name__))

    @classmethod
    def get_import_path(cls, path):
        log = logging.getLogger(cls.__name__)
        classy_build_path = os.path.join(path, "python", "build")
        if not os.path.isdir(classy_build_path):
            log.error(
                "Either CLASS is not in the given folder, "
                "'%s', or you have not compiled it.", path)
            return None
        py_version = "%d.%d" % (sys.version_info.major, sys.version_info.minor)
        try:
            post = next(d for d in os.listdir(classy_build_path)
                        if (d.startswith("lib.") and py_version in d))
        except StopIteration:
            log.error(
                "The CLASS installation at '%s' has not been compiled for the "
                "current Python version.", path)
            return None
        return os.path.join(classy_build_path, post)

    @classmethod
    def is_compatible(cls):
        import platform
        if platform.system() == "Windows":
            return False
        return True

    @classmethod
    def is_installed(cls, **kwargs):
        log = logging.getLogger(cls.__name__)
        if not kwargs.get("code", True):
            return True
        path = kwargs["path"]
        if path is not None and path.lower() == "global":
            path = None
        if path and not kwargs.get("allow_global"):
            log.info("Importing *local* CLASS from '%s'.", path)
            if not os.path.exists(path):
                log.error("The given folder does not exist: '%s'", path)
                return False
            classy_build_path = cls.get_import_path(path)
            if not classy_build_path:
                return False
        elif not path:
            log.info("Importing *global* CLASS.")
            classy_build_path = None
        else:
            log.info(
                "Importing *auto-installed* CLASS (but defaulting to *global*)."
            )
            classy_build_path = cls.get_import_path(path)
        try:
            return load_module('classy',
                               path=classy_build_path,
                               min_version=cls._classy_repo_version)
        except ImportError:
            if path is not None and path.lower() != "global":
                log.error(
                    "Couldn't find the CLASS python interface at '%s'. "
                    "Are you sure it has been installed there?", path)
            else:
                log.error(
                    "Could not import global CLASS installation. "
                    "Specify a Cobaya or CLASS installation path, "
                    "or install the CLASS Python interface globally with "
                    "'cd /path/to/class/python/ ; python setup.py install'")
            return False
        except VersionCheckError as e:
            log.error(str(e))
            return False

    @classmethod
    def install(cls,
                path=None,
                force=False,
                code=True,
                no_progress_bars=False,
                **kwargs):
        log = logging.getLogger(cls.__name__)
        if not code:
            log.info("Code not requested. Nothing to do.")
            return True
        log.info("Installing pre-requisites...")
        exit_status = pip_install("cython")
        if exit_status:
            log.error("Could not install pre-requisite: cython")
            return False
        log.info("Downloading classy...")
        success = download_github_release(os.path.join(path, "code"),
                                          cls._classy_repo_name,
                                          cls._classy_repo_version,
                                          repo_rename=cls.__name__,
                                          no_progress_bars=no_progress_bars,
                                          logger=log)
        if not success:
            log.error("Could not download classy.")
            return False
        # Compilation
        # gcc check after downloading, in case the user wants to change the compiler by
        # hand in the Makefile
        classy_path = cls.get_path(path)
        if not check_gcc_version(cls._classy_min_gcc_version,
                                 error_returns=False):
            log.error(
                "Your gcc version is too low! CLASS would probably compile, "
                "but it would leak memory when running a chain. Please use a "
                "gcc version newer than %s. You can still compile CLASS by hand, "
                "maybe changing the compiler in the Makefile. CLASS has been "
                "downloaded into %r", cls._classy_min_gcc_version, classy_path)
            return False
        log.info("Compiling classy...")
        from subprocess import Popen, PIPE
        env = deepcopy(os.environ)
        env.update({"PYTHON": sys.executable})
        process_make = Popen(["make"],
                             cwd=classy_path,
                             stdout=PIPE,
                             stderr=PIPE,
                             env=env)
        out, err = process_make.communicate()
        if process_make.returncode:
            log.info(out)
            log.info(err)
            log.error("Compilation failed!")
            return False
        return True
예제 #22
0
        'background_verbose': 1,
        'thermodynamics_verbose': 1,
        'perturbations_verbose': 1,
        'primordial_verbose': 1,
        'spectra_verbose': 1,
        'nonlinear_verbose': 1,
        'output_verbose': 1,
        'transfer_verbose': 1,
        'tau_reio': 0.079,
        'headers': 'yes',
        'modes': 's',
        'k_output_values': 0.0001 + i * 0.0005,
        'lensing_verbose': 1,
        'radiation_streaming_trigger_tau_over_tau_k': 10000
    }
    cosmo = Class()
    cosmo.set(params)
    cosmo.compute()
    #	kval = 0.0001 + i/(10000*1.0)
    kval = 0.0001 + i * 0.0005
    data = cosmo.get_perturbations()['scalar']

    print kval

    print data[0]['a']
    print len(data[0]['a'])

    out = np.zeros(shape=(len(data[0]['a']), 4))

    out[:, 0] = kval
    out[:, 1] = 1.0 / data[0]['a'] - 1
예제 #23
0
from classy import Class
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
from scipy.integrate import odeint
import math


font = {'size'   : 16, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'
plt.rcParams["figure.figsize"] = [8.0,6.0]



M = Class()
# Table I of 1908.06995, third column, best-fit values
# Note: f and m found by trial-and-error to give the best-fit fEDE=.12, zc=10^3.562=3647.
M.set({'f_scf': 3.98e+26, 'm_scf': 5.31e-28, 'thetai_scf': 2.83, 'A_s': 2.215e-09, 'n_s': 0.9889, '100*theta_s': 1.04152, 'omega_b': 0.02253, 'omega_cdm': 0.1306, 'm_ncdm': 0.06, 'tau_reio': 0.072})


#'non linear':can choose 'halofit' or 'HMCODE'
M.set({'non linear':'HMCODE','N_ncdm':1, 'N_ur':2.0328, 'Omega_Lambda':0.0, 'Omega_fld':0, 'Omega_scf':-1, 'n_scf':3, 'CC_scf':1, 'scf_parameters':'1, 1, 1, 1, 1, 0.0', 'scf_tuning_index':3, 'attractor_ic_scf':'no', 'output':'tCl pCl lCl mPk', 'lensing':'yes', 'l_max_scalars':2508, 'P_k_max_h/Mpc':20,'z_max_pk':4.})
M.compute()

print(M.Omega_m())


baM = M.get_background()

fEDE = M.fEDE()
예제 #24
0
class TestClass(unittest.TestCase):
    """
    Testing Class and its wrapper classy on different cosmologies

    To run it, do
    ~] nosetest test_class.py

    It will run many times Class, on different cosmological scenarios, and
    everytime testing for different output possibilities (none asked, only mPk,
    etc..)

    """

    @classmethod
    def setUpClass(self):
        self.faulty_figs_path = os.path.join(
            os.path.sep.join(os.path.realpath(__file__).split(os.path.sep)[:-1]), "faulty_figs"
        )

        if os.path.isdir(self.faulty_figs_path):
            shutil.rmtree(self.faulty_figs_path)

        os.mkdir(self.faulty_figs_path)

    @classmethod
    def tearDownClass(self):
        pass

    def setUp(self):
        """
        set up data used in the tests.
        setUp is called before each test function execution.
        """
        self.cosmo = Class()
        self.cosmo_newt = Class()

        self.verbose = {
            "input_verbose": 1,
            "background_verbose": 1,
            "thermodynamics_verbose": 1,
            "perturbations_verbose": 1,
            "transfer_verbose": 1,
            "primordial_verbose": 1,
            "spectra_verbose": 1,
            "nonlinear_verbose": 1,
            "lensing_verbose": 1,
            "output_verbose": 1,
        }
        self.scenario = {}

    def tearDown(self):
        self.cosmo.struct_cleanup()
        self.cosmo.empty()
        self.cosmo_newt.struct_cleanup()
        self.cosmo_newt.empty()
        del self.scenario

    def poormansname(self, somedict):
        string = "_".join([k + "=" + str(v) for k, v in somedict.iteritems()])
        string = string.replace("/", "%")
        string = string.replace(",", "")
        string = string.replace(" ", "")
        return string

    @parameterized.expand(TUPLE_ARRAY)
    def test_0wrapper_implementation(self, inputdict):
        """Create a few instances based on different cosmologies"""
        self.scenario.update(inputdict)

        self.name = self.poormansname(inputdict)

        sys.stderr.write("\n\n---------------------------------\n")
        sys.stderr.write("| Test case %s |\n" % self.name)
        sys.stderr.write("---------------------------------\n")
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
            sys.stdout.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")

        setting = self.cosmo.set(dict(self.verbose.items() + self.scenario.items()))
        self.assertTrue(setting, "Class failed to initialize with input dict")

        cl_dict = {"tCl": ["tt"], "lCl": ["pp"], "pCl": ["ee", "bb"]}
        density_cl_list = ["nCl", "sCl"]

        # 'lensing' is always set to yes. Therefore, trying to compute 'tCl' or
        # 'pCl' will fail except if we also ask for 'lCl'. The flag
        # 'should_fail' stores this status.
        sys.stderr.write("Should")
        should_fail = self.test_incompatible_input()
        if should_fail:
            sys.stderr.write(" fail...\n")
        else:
            sys.stderr.write(" not fail...\n")

        if not should_fail:
            self.cosmo.compute()
        else:
            self.assertRaises(CosmoSevereError, self.cosmo.compute)
            return

        self.assertTrue(self.cosmo.state, "Class failed to go through all __init__ methods")
        if self.cosmo.state:
            print "--> Class is ready"
        # Depending
        if "output" in self.scenario.keys():
            # Positive tests of raw cls
            output = self.scenario["output"]
            for elem in output.split():
                if elem in cl_dict.keys():
                    for cl_type in cl_dict[elem]:
                        sys.stderr.write("--> testing raw_cl for %s\n" % cl_type)
                        cl = self.cosmo.raw_cl(100)
                        self.assertIsNotNone(cl, "raw_cl returned nothing")
                        self.assertEqual(np.shape(cl[cl_type])[0], 101, "raw_cl returned wrong size")
                    # TODO do the same for lensed if 'lCl' is there, and for
                    # density cl
                if elem == "mPk":
                    sys.stderr.write("--> testing pk function\n")
                    pk = self.cosmo.pk(0.1, 0)
                    self.assertIsNotNone(pk, "pk returned nothing")
            # Negative tests of output functions
            if not any([elem in cl_dict.keys() for elem in output.split()]):
                sys.stderr.write("--> testing absence of any Cl\n")
                self.assertRaises(CosmoSevereError, self.cosmo.raw_cl, 100)
            if "mPk" not in output.split():
                sys.stderr.write("--> testing absence of mPk\n")
                self.assertRaises(CosmoSevereError, self.cosmo.pk, 0.1, 0)

        if COMPARE_OUTPUT:
            # Now, compute with Newtonian gauge, and compare the results
            self.cosmo_newt.set(dict(self.verbose.items() + self.scenario.items()))
            self.cosmo_newt.set({"gauge": "newtonian"})
            self.cosmo_newt.compute()
            # Check that the computation worked
            self.assertTrue(self.cosmo_newt.state, "Class failed to go through all __init__ methods in Newtonian gauge")

            self.compare_output(self.cosmo, self.cosmo_newt)

    def test_incompatible_input(self):

        should_fail = False

        # If we have tensor modes, we must have one tensor observable,
        # either tCl or pCl.
        if has_tensor(self.scenario):
            if "output" not in self.scenario.keys():
                should_fail = True
            else:
                output = self.scenario["output"].split()
                if "tCl" not in output and "pCl" not in output:
                    should_fail = True

        # If we have specified lensing, we must have lCl in output,
        # otherwise lensing will not be read (which is an error).
        if "lensing" in self.scenario.keys():
            if "output" not in self.scenario.keys():
                should_fail = True
            else:
                output = self.scenario["output"].split()
                if "lCl" not in output:
                    should_fail = True
                elif "tCl" not in output and "pCl" not in output:
                    should_fail = True

        # If we have specified a tensor method, we must have tensors.
        if "tensor method" in self.scenario.keys():
            if not has_tensor(self.scenario):
                should_fail = True

        # If we have specified non linear, we must have some form of
        # perturbations output.
        if "non linear" in self.scenario.keys():
            if "output" not in self.scenario.keys():
                should_fail = True

        # If we ask for Cl's of lensing potential, we must have scalar modes.
        if "output" in self.scenario.keys() and "lCl" in self.scenario["output"].split():
            if "modes" in self.scenario.keys() and self.scenario["modes"].find("s") == -1:
                should_fail = True

        # If we specify initial conditions (for scalar modes), we must have
        # perturbations and scalar modes.
        if "ic" in self.scenario.keys():
            if "modes" in self.scenario.keys() and self.scenario["modes"].find("s") == -1:
                should_fail = True
            if "output" not in self.scenario.keys():
                should_fail = True

        # If we use inflation module, we must have scalar modes,
        # tensor modes, no vector modes and we should only have adiabatic IC:
        if "P_k_ini type" in self.scenario.keys() and self.scenario["P_k_ini type"].find("inflation") != -1:
            if "modes" not in self.scenario.keys():
                should_fail = True
            else:
                if self.scenario["modes"].find("s") == -1:
                    should_fail = True
                if self.scenario["modes"].find("v") != -1:
                    should_fail = True
                if self.scenario["modes"].find("t") == -1:
                    should_fail = True
            if "ic" in self.scenario.keys() and self.scenario["ic"].find("i") != -1:
                should_fail = True

        return should_fail

    def compare_output(self, reference, candidate):
        sys.stderr.write("\n\n---------------------------------\n")
        sys.stderr.write("| Comparing synch and Newt: |\n")
        sys.stderr.write("---------------------------------\n")

        for elem in ["raw_cl", "lensed_cl", "density_cl"]:
            # Try to get the elem, but if they were not computed, a
            # CosmoComputeError should be raised. In this case, ignore the
            # whole block.
            try:
                to_test = getattr(candidate, elem)()
            except CosmoSevereError:
                continue
            ref = getattr(reference, elem)()
            for key, value in ref.iteritems():
                if key != "ell":
                    sys.stderr.write("--> testing equality of %s %s\n" % (elem, key))
                    # For all self spectra, try to compare allclose
                    if key[0] == key[1]:
                        # If it is a 'dd' or 'll', it is a dictionary.
                        if isinstance(value, dict):
                            for subkey in value.iterkeys():
                                try:
                                    np.testing.assert_allclose(
                                        value[subkey], to_test[key][subkey], rtol=1e-03, atol=1e-20
                                    )
                                except AssertionError:
                                    self.cl_faulty_plot(elem + "_" + key, value[subkey][2:], to_test[key][subkey][2:])
                                except TypeError:
                                    self.cl_faulty_plot(elem + "_" + key, value[subkey][2:], to_test[key][subkey][2:])
                        else:
                            try:
                                np.testing.assert_allclose(value, to_test[key], rtol=1e-03, atol=1e-20)
                            except AssertionError:
                                self.cl_faulty_plot(elem + "_" + key, value[2:], to_test[key][2:])
                            except TypeError:
                                self.cl_faulty_plot(elem + "_" + key, value[2:], to_test[key][2:])
                    # For cross-spectra, as there can be zero-crossing, we
                    # instead compare the difference.
                    else:
                        # First, we multiply each array by the biggest value
                        norm = max(np.abs(value).max(), np.abs(to_test[key]).max())
                        value *= norm
                        to_test[key] *= norm
                        try:
                            np.testing.assert_array_almost_equal(value, to_test[key], decimal=3)
                        except AssertionError:
                            self.cl_faulty_plot(elem + "_" + key, value[2:], to_test[key][2:])

        if "output" in self.scenario.keys():
            if self.scenario["output"].find("mPk") != -1:
                sys.stderr.write("--> testing equality of Pk")
                k = np.logspace(-2, log10(self.scenario["P_k_max_1/Mpc"]))
                reference_pk = np.array([reference.pk(elem, 0) for elem in k])
                candidate_pk = np.array([candidate.pk(elem, 0) for elem in k])
                try:
                    np.testing.assert_allclose(reference_pk, candidate_pk, rtol=5e-03, atol=1e-20)
                except AssertionError:
                    self.pk_faulty_plot(k, reference_pk, candidate_pk)

    def cl_faulty_plot(self, cl_type, reference, candidate):
        path = os.path.join(self.faulty_figs_path, self.name)

        fig = plt.figure()
        ax_lin = plt.subplot(211)
        ax_log = plt.subplot(212)
        ell = np.arange(max(np.shape(candidate))) + 2
        ax_lin.plot(ell, 1 - candidate / reference)
        ax_log.loglog(ell, abs(1 - candidate / reference))

        ax_lin.set_xlabel("l")
        ax_log.set_xlabel("l")
        ax_lin.set_ylabel("1-candidate/reference")
        ax_log.set_ylabel("abs(1-candidate/reference)")

        ax_lin.set_title(self.name)
        ax_log.set_title(self.name)

        ax_lin.legend([cl_type])
        ax_log.legend([cl_type])

        fig.savefig(path + "_" + cl_type + ".pdf")

        # Store parameters (contained in self.scenario) to text file
        parameters = dict(self.verbose.items() + self.scenario.items())
        with open(path + ".ini", "w") as param_file:
            for key, value in parameters.iteritems():
                param_file.write(key + " = " + str(value) + "\n")

    def pk_faulty_plot(self, k, reference, candidate):
        path = os.path.join(self.faulty_figs_path, self.name)

        fig = plt.figure()
        ax_lin = plt.subplot(211)
        ax_log = plt.subplot(212)
        ax_lin.plot(k, 1 - candidate / reference)
        ax_log.loglog(k, abs(1 - candidate / reference))

        ax_lin.set_xlabel("k")
        ax_log.set_xlabel("k")
        ax_lin.set_ylabel("1-candidate/reference")
        ax_log.set_ylabel("abs(1-candidate/reference)")

        ax_lin.set_title(self.name)
        ax_log.set_title(self.name)

        ax_lin.legend("$P_k$")
        ax_log.legend("$P_k$")

        fig.savefig(path + "_" + "pk" + ".pdf")

        # Store parameters (contained in self.scenario) to text file
        parameters = dict(self.verbose.items() + self.scenario.items())
        with open(path + ".ini", "w") as param_file:
            for key, value in parameters.iteritems():
                param_file.write(key + " = " + str(value) + "\n")
예제 #25
0
파일: model.py 프로젝트: bengranett/synmock
class ModelPk(object):
    """ """
    logger = logging.getLogger(__name__)

    def __init__(self, **param_dict):
        """ """
        self.params = params.copy()

        self.class_params = class_params.copy()
        self.set(**param_dict)  #other params for CLASS

        self._cosmo = None
        self._redshift_func = None
        self._logpk_func = None

        if self.params['mpk'] is not None:
            self.load_pk_from_file(self.params['mpk'])

    def load_pk_from_file(self, path):
        """ """
        self.logger.info(f"Loading matter power spectrum from file {path}")
        k, pk = np.loadtxt(path, unpack=True)
        self.logger.info(f"min k: {k.min()}, max k: {k.max()}, steps {len(k)}")
        pk *= self.params['bias']**2
        lk = np.log(k)
        lpk = np.log(pk)
        self._logpk_func = interpolate.interp1d(lk,
                                                lpk,
                                                bounds_error=False,
                                                fill_value=(0, 0))

    def set(self, **param_dict):
        """ """
        for key, value in param_dict.items():
            if key == 'non_linear':
                key = 'non linear'
            self.logger.debug("Set %s=%s", key, value)
            found = False
            if key in self.class_params:
                self.class_params[key] = value
                found = True
            if key in self.params:
                self.params[key] = value
                found = True
            if not found:
                continue

    @property
    def cosmo(self):
        """ """
        if not self._cosmo:
            self._cosmo = Class()
            self._cosmo.set(self.class_params)

            self.logger.info("Initializing Class")
            self._cosmo.compute()

            if self.params['fix_sigma8']:
                sig8 = self._cosmo.sigma8()
                A_s = self._cosmo.pars['A_s']
                self._cosmo.struct_cleanup()
                # renormalize to fix sig8
                self.A_s = A_s * (self.params['sigma8'] * 1. / sig8)**2
                self._cosmo.set(A_s=self.A_s)
                self._cosmo.compute()

            sig8 = self._cosmo.sigma8()

            self.params['sigma8'] = sig8
            self.params['A_s'] = self._cosmo.pars['A_s']
            self.params[
                'sigma8z'] = sig8 * self._cosmo.scale_independent_growth_factor(
                    self.class_params['z_pk'])
            self.params['f'] = self._cosmo.scale_independent_growth_factor_f(
                self.class_params['z_pk'])

            self.logger.info(f"          z: {self.class_params['z_pk']}")
            self.logger.info(f"    sigma_8: {self.params['sigma8']}")
            self.logger.info(f" sigma_8(z): {self.params['sigma8z']}")
            self.logger.info(f"       f(z): {self.params['f']}")
            self.logger.info(
                f"f sigma8(z): {self.params['f']*self.params['sigma8z']}")
        return self._cosmo

    def class_pk(self, k):
        """ """
        self.logger.info("Computing power spectrum with Class")

        z = np.array([self.class_params['z_pk']]).astype('d')

        shape = k.shape
        k = k.flatten()

        nk = len(k)
        k = np.reshape(k, (nk, 1, 1))

        pk = self.cosmo.get_pk(k * self.class_params['h'], z, nk, 1,
                               1).reshape((nk, ))
        k = k.reshape((nk, ))

        # set h units
        pk *= self.class_params['h']**3

        pk *= self.params['bias']**2

        pk = pk.reshape(shape)

        return pk

    def get_pk(self, k):
        ii = k > 0
        out = np.zeros(k.shape, dtype='d')
        out[ii] = np.exp(self.logpk_func(np.log(k[ii])))
        return out

    @property
    def logpk_func(self):
        if self._logpk_func is None:
            k = np.logspace(self.params['log_kmin'], self.params['log_kmax'],
                            self.params['log_ksteps'])
            pk = self.class_pk(k)
            lk = np.log(k)
            lpk = np.log(pk)
            print("logk min", lk.min())
            self._logpk_func = interpolate.interp1d(lk,
                                                    lpk,
                                                    bounds_error=False,
                                                    fill_value=(0, 0))
        return self._logpk_func

    def comoving_distance(self, z):
        """ """
        return (1 +
                z) * self.cosmo.angular_distance(z) * self.class_params['h']

    def redshift_at_comoving_distance(self, r):
        """ """
        try:
            z = 10**self.redshift_func(r) - 1
        except ValueError:
            self.logger.error(f"r min {r.min()} max {r.max()}",
                              file=sys.stderr)
            raise
        return z

    @property
    def redshift_func(self):
        """ """
        if self._redshift_func is None:
            zz = np.logspace(self.params['logzmin'], self.params['logzmax'],
                             self.params['logzsteps']) - 1
            r = np.zeros(len(zz))
            for i, z in enumerate(zz):
                r[i] = self.comoving_distance(z)
            self._redshift_func = interpolate.interp1d(r, np.log10(1 + zz))
        return self._redshift_func
예제 #26
0
class classy(SlikPlugin):
    """
    Plugin for CLASS.

    Credit: Brent Follin, Teresa Hamill
    """

    #{cosmoslik name : class name}
    name_mapping = {'As':'A_s',
                    'ns':'n_s',
                    'r':'r',
                    'nt':'n_t',
                    'ombh2':'omega_b',
                    'omch2':'omega_cdm',
                    'omnuh2':'omega_ncdm',
                    'tau':'tau_reio',
                    'H0':'H0',
                    'massive_neutrinos':'N_ncdm',
                    'massless_neutrinos':'N_ur',
                    'Yp':'YHe',
                    'pivot_scalar':'k_pivot'}


    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()


    def __call__(self,
                 ombh2,
                 omch2,
                 H0,
                 As,
                 ns,
                 tau,
                 omnuh2, #0.006
                 w=None,
                 r=None,
                 nrun=None,
                 omk=0,
                 Yp=None,
                 Tcmb=2.7255,
                 massive_neutrinos=1,
                 massless_neutrinos=2.046,
                 l_max_scalar=3000,
                 l_max_tensor=3000,
                 pivot_scalar=0.002,
                 outputs=[],
                 **kwargs):


        
        self.model.set(output='tCl, lCl, pCl',
                       lensing='yes',
                       l_max_scalars=l_max_scalar,
                       **{self.name_mapping[k]:v for k,v in locals().items() 
                          if k in self.name_mapping and v is not None})
        self.model.compute()

        ell = arange(l_max_scalar+1)
        self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max_scalar)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
                           for x in ['TT','TE','EE','BB','PP','TP']}

        self.model.struct_cleanup()
        self.model.empty()
        
        return self.cmb_result

    def get_bao_observables(self, z):
        return {'H':self.model.Hubble(z),
                'D_A':self.model.angular_distance(z),
                'c':1.0,
                'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
예제 #27
0
    'k_per_decade_for_bao': k_per_decade,
    'k_min_tau0':
    k_min_tau0,  # this value controls the minimum k value in the figure
    'perturb_sampling_stepsize': '1',
    'P_k_max_1/Mpc': P_k_max_inv_Mpc,
    'compute damping scale':
    'yes',  # needed to output and plot Silk damping scale
    'gauge': 'newtonian'
}

###############
#
# call CLASS
#
###############
M = Class()
M.set(common_settings)
M.compute()

#
# define conformal time sampling array
#
#times = M.get_current_derived_parameters(['tau_rec','conformal_age'])
#tau_rec=times['tau_rec']
#tau_0 = times['conformal_age']
#tau1 = np.logspace(math.log10(tau_ini),math.log10(tau_rec),tau_num_early)
#tau2 = np.logspace(math.log10(tau_rec),math.log10(tau_0),tau_num_late)[1:]
#tau2[-1] *= 0.999 # this tiny shift avoids interpolation errors
#tau = np.concatenate((tau1,tau2))
#tau_num = len(tau)
#
예제 #28
0
def test_class_setup():
    cosmology = astropy.cosmology.Planck13
    assert cosmology.Om0 == cosmology.Odm0 + cosmology.Ob0
    assert 1 == (cosmology.Om0 + cosmology.Ode0 + cosmology.Ok0 +
                 cosmology.Ogamma0 + cosmology.Onu0)
    class_parameters = get_class_parameters(cosmology)
    try:
        from classy import Class
        cosmo = Class()
        cosmo.set(class_parameters)
        cosmo.compute()
        assert cosmo.h() == cosmology.h
        assert cosmo.T_cmb() == cosmology.Tcmb0.value
        assert cosmo.Omega_b() == cosmology.Ob0
        # Calculate Omega(CDM)_0 two ways:
        assert abs((cosmo.Omega_m() - cosmo.Omega_b()) -
                   (cosmology.Odm0 - cosmology.Onu0)) < 1e-8
        assert abs(cosmo.Omega_m() - (cosmology.Om0 - cosmology.Onu0)) < 1e-8
        # CLASS calculates Omega_Lambda itself so this is a non-trivial test.
        calculated_Ode0 = cosmo.get_current_derived_parameters(
            ['Omega_Lambda'])['Omega_Lambda']
        assert abs(calculated_Ode0 - (cosmology.Ode0 + cosmology.Onu0)) < 1e-5
        cosmo.struct_cleanup()
        cosmo.empty()
    except ImportError:
        pass
예제 #29
0
class TransitionProbabilities:
    def __init__(self, cosmo=None, z_reio=7.82, z_recomb=1089.80):
        """
        Container class to compute dark photon transition probabilities.

        :param cosmo: Cosmology as an astropy object. If `None`, defaults to Planck18 cosmology.
        :param z_reio: Redshift of reionization. By default consistent with Planck18 cosmology.
        :param z_recomb: Redshift of recombination. By default consistent with Planck18 cosmology.
        """

        # Set constants
        self.xi_3 = 1.20205  # Apéry's constant, from Wikipedia
        self.eta = 6.129e-10  # Baryon-to-photon ratio, from 1912.01132
        self.Y_p = 0.247  # Helium-to-hydrogen mass fraction, from 1912.01132
        self.T_0 = 2.725  # CMB temperature today, from 0911.1955
        self.T_0_n = (c.k_B * self.T_0 * u.Kelvin).to(u.eV).value * \
            eV  # CMB temperature today, in natural units

        # Set cosmology
        if cosmo is None:
            self.cosmo = self.get_Planck18_cosmology()
        else:
            self.cosmo = cosmo

        # Set redshift of reionization
        self.z_reio = z_reio
        self.z_recomb = z_recomb

        # Initialize CLASS instance to compute ionization fraction from
        self.initialize_class_inst()

    def get_Planck18_cosmology(self):
        """
        Stolen from https://github.com/abatten/fruitbat/blob/c074abff432c3b267d00fbb49781a0e0c6eeab75/fruitbat/cosmologies.py
        Planck 2018 paper VI Table 2 Final column (68% confidence interval)
        This is the Planck 2018 cosmology that will be added to Astropy when the
        paper is accepted.

        :return: astropy.cosmology.FlatLambdaCDM instance describing Planck18 cosmology
        """

        planck18_cosmology = {
            'Oc0':
            0.2607,
            'Ob0':
            0.04897,
            'Om0':
            0.3111,
            'H0':
            67.66,
            'n':
            0.9665,
            'sigma8':
            0.8102,
            'tau':
            0.0561,
            'z_reion':
            7.82,
            't0':
            13.787,
            'Tcmb0':
            2.7255,
            'Neff':
            3.046,
            'm_nu': [0., 0., 0.06],
            'z_recomb':
            1089.80,
            'reference':
            "Planck 2018 results. VI. Cosmological Parameters, "
            "A&A, submitted, Table 2 (TT, TE, EE + lowE + lensing + BAO)"
        }

        Planck18 = FlatLambdaCDM(H0=planck18_cosmology['H0'],
                                 Om0=planck18_cosmology['Om0'],
                                 Tcmb0=planck18_cosmology['Tcmb0'],
                                 Neff=planck18_cosmology['Neff'],
                                 Ob0=planck18_cosmology['Ob0'],
                                 name="Planck18",
                                 m_nu=u.Quantity(planck18_cosmology['m_nu'],
                                                 u.eV))

        return Planck18

    def initialize_class_inst(self):
        """ Get electron ionization fraction from CLASS
        """
        class_parameters = {
            'H0': self.cosmo.H0.value,
            'Omega_b': self.cosmo.Ob0,
            'N_ur': self.cosmo.Neff,
            'Omega_cdm': self.cosmo.Odm0,
            'YHe': self.Y_p,
            'z_reio': self.z_reio
        }

        self.CLASS_inst = Class()
        self.CLASS_inst.set(class_parameters)
        self.CLASS_inst.compute()

        # z_ary = np.logspace(-3, 5, 100000)
        z_ary = np.linspace(0, 33000., 300000)
        x_e_ary = [self.CLASS_inst.ionization_fraction(z) for z in z_ary]
        self.x_e = interp1d(z_ary, x_e_ary)

    def m_A_sq(self, z, omega, x_e=None):
        """ Effective photon plasma mass squared, in natural units, from 1507.02614

            :param z: Redshift
            :param omega: Photon frequency
            :param x_e: Free electron fraction if not default (optional)
            :return: Effective photon plasma mass squared, in natural units
        """
        if x_e is None:
            x_e = self.x_e(z)

        m_A_sq = 1.4e-21 * (x_e - 6e-3 * (omega / eV)**2 *
                            (1 - x_e)) * (self.n_H(z) / Centimeter**-3)

        return m_A_sq * eV**2  # Convert to natural units

    def n_p(self, z):
        """ Proton density at redshift `z` in cm^3, from 1507.02614
        """
        return (1 - self.Y_p / 2.) * self.eta * 2 * self.xi_3 / np.pi**2 * (
            self.T_0_n * (1 + z))**3

    def n_H(self, z):
        """ Number density of hydrogen nuclei at redshift `z` in cm^3
        """
        return (1 -
                self.Y_p) * self.eta * 2 * self.xi_3 / np.pi**2 * (self.T_0_n *
                                                                   (1 + z))**3

    def n_b(self, z):
        """ Baryon density at redshift `z` in cm^3. 
        """
        return self.eta * 2 * self.xi_3 / np.pi**2 * (self.T_0_n * (1 + z))**3

    def dz_dt(self, z):
        """ dz/dt
        """
        return -self.cosmo.H(z).value * Kmps / Mpc * (1 + z)

    def omega(self, omega_0, z, evolve_z=True):
        """ Frequency corresponding to present-day omega_0 evolved to `z` is `evolve_z` is `True`, otherwise
            just return `omega_0`.
        """
        if evolve_z:
            return omega_0 * (1 + z)
        else:
            return omega_0

    def get_z_crossings(self, m_A, omega_0, evolve_z=True):
        """
        Find redshifts at which resonance occurs

        :param m_A: Dark photon mass
        :param omega_0: Present-day frequency
        :param evolve_z: Whether to evolve frequency in redshift.
        :return: Array of redshifts at which resonance occurs
        """

        z_ary = np.logspace(-3, 4.5, 20000)

        m_A_ary = np.nan_to_num(np.sqrt(
            self.m_A_sq(z_ary, self.omega(omega_0, z_ary, evolve_z))),
                                nan=1e-18 * eV)

        where_ary = np.where(
            np.logical_or((m_A_ary[:-1] < m_A) * (m_A_ary[1:] > m_A),
                          (m_A_ary[:-1] > m_A) * (m_A_ary[1:] < m_A)))

        def m_A_sq(z):
            return np.nan_to_num(np.sqrt(
                self.m_A_sq(z, self.omega(omega_0, z, evolve_z))),
                                 nan=1e-18 * eV) - m_A

        z_cross_ary = []
        for i in range(len(where_ary[0])):
            z_cross_ary.append(
                brentq(m_A_sq, z_ary[where_ary[0][i]],
                       z_ary[where_ary[0][i] + 1]))

        return np.array(z_cross_ary)

    def P_trans(self,
                m_A,
                z_res_ary,
                omega_0,
                eps,
                evolve_z=True,
                approx_linearize=True):
        """
        Photon transition probability

        :param m_A: Dark photon mass
        :param z_res_ary: Array of resonance redshifts
        :param omega_0: Photon frequency (present day if `approx_linearize`, otherwise absolute)
        :param eps: Kinetic mixing coupling
        :param evolve_z: Whether to evolve `omega_0` in redshift
        :param approx_linearize: Linearize probability in `epsilon`
        :return: Transition probability array at redshifts `z_res_ary`
        """

        d_log_m_A_sq_dz = np.array([
            derivative(lambda z: np.log(
                self.m_A_sq(z=z, omega=self.omega(omega_0, z, evolve_z))),
                       x0=z,
                       dx=1e-7) for z in z_res_ary
        ])

        omega_res_ary = self.omega(omega_0, z_res_ary, evolve_z)

        if approx_linearize:
            P_homo = np.pi * m_A ** 2 * eps ** 2 / omega_res_ary * \
                np.abs((d_log_m_A_sq_dz * self.dz_dt(z_res_ary))) ** -1
        else:
            r = np.abs((d_log_m_A_sq_dz * self.dz_dt(z_res_ary)))**-1
            k = m_A**2 / (2 * omega_res_ary)
            P_homo = 1 - np.exp(-2 * np.pi * r * k * np.sin(eps)**2)

        return np.nan_to_num(P_homo)

    def P_tot(self,
              omega_0,
              eps,
              m_A,
              approx_linearize=True,
              evolve_z=True,
              sum_probs=False,
              **kwargs):
        """
        Total conversion probability in the homogeneous limit

        :param omega_0: Present-day photon frequency
        :param eps: Dark photon coupling
        :param m_A: Dark photon mass
        :param approx_linearize: Whether to use linearized probability approximation
        :param evolve_z: Whether to evolve frequency in redshift. 
        :param sum_probs: Whether to sum over probabilities associated with different z
        :return: Redshift resonance array, transition probability array
        """

        # Find redshift at which resonance occurs
        z_res_ary = [
            self.get_z_crossings(m_A, omega, evolve_z) for omega in omega_0
        ]

        # Get transition probabilities at resonance

        if sum_probs:
            P_ary = np.array([
                np.nansum(
                    self.P_trans(m_A,
                                 z,
                                 omega,
                                 eps,
                                 approx_linearize=approx_linearize,
                                 evolve_z=evolve_z))
                for z, omega in zip(z_res_ary, omega_0)
            ])
        else:
            P_ary = np.array([(self.P_trans(m_A,
                                            z,
                                            omega,
                                            eps,
                                            approx_linearize=approx_linearize,
                                            evolve_z=evolve_z))
                              for z, omega in zip(z_res_ary, omega_0)])

        return z_res_ary, 1., P_ary

    def B_CMB(self, omega, T):
        """ CMB spectral intensity at frequency `omega` (in natural units) for temperature `T` (in Kelvin)
        """
        T_N = (c.k_B * T * u.Kelvin).to(u.eV).value * eV
        return omega**3 / (2 * np.pi**2) * (np.exp(omega / T_N) - 1)**-1
예제 #30
0
class classy(SlikPlugin):
    """
    Plugin for CLASS.
    Credit: Brent Follin, Teresa Hamill, Andy Scacco
    """
    
    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()


    def __call__(self,
        **kwargs):


        self.model.set(**kwargs)
        self.model.compute()

        ell = arange(l_max_scalar+1)
        self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max_scalar)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
                           for x in ['TT','TE','EE','BB','PP','TP']}

        self.model.struct_cleanup()
        self.model.empty()
        
        return self.cmb_result

    def get_bao_observables(self, z):
        return {'H':self.model.Hubble(z),
                'D_A':self.model.angular_distance(z),
                'c':1.0,
                'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
예제 #31
0
def make_LSS_data(z, cosmo_dict, outfile=None):
    """
    Args:
        z (float): redshift
        cosmo_dict (dictionary): the CLASS cosmology
        outfile (string): file to save to
            
    """
    if outfile is None:
        outfile = "LSS_dict"

    LSS_dict = {}

    h = cosmo_dict["h"]
    Omega_b = cosmo_dict["Omega_b"]
    Omega_m = cosmo_dict["Omega_cdm"] + cosmo_dict["Omega_b"]
    n_s = cosmo_dict["n_s"]
    cosmo = Class()
    cosmo.set(cosmo_dict)
    cosmo.compute()
    sigma8 = cosmo.sigma8()
    print("sigma8 is:", sigma8)

    k = np.logspace(-5, 3, base=10, num=4000)  #1/Mpc; comoving
    kh = k / h  #h/Mpc; comoving
    r = np.logspace(-2, 3, num=1000)  #Mpc/h comoving
    M = np.logspace(12, 16.3, 1000)  #Msun/h

    z = np.asarray(z)
    for zi in z:
        P_nl = np.array([cosmo.pk(ki, zi) for ki in k]) * h**3
        P_lin = np.array([cosmo.pk_lin(ki, zi) for ki in k]) * h**3

        xi_nl = ct.xi.xi_mm_at_r(r, kh, P_nl)
        xi_lin = ct.xi.xi_mm_at_r(r, kh, P_lin)

        c = np.array([
            conc.concentration_at_M(Mi,
                                    kh,
                                    P_lin,
                                    n_s,
                                    Omega_b,
                                    Omega_m,
                                    h,
                                    Mass_type="mean") for Mi in M
        ])
        bias = ct.bias.bias_at_M(M, kh, P_lin, Omega_m)

        args_at_z = {
            "k": kh,
            "P_lin": P_lin,
            "P_nl": P_nl,
            "r": r,
            "xi_lin": xi_lin,
            "xi_nl": xi_nl,
            "M": M,
            "concentration": c,
            "bias": bias,
            "h": h,
            "Omega_m": Omega_m,
            "n_s": n_s,
            "sigma8": sigma8,
            "Omega_b": Omega_b
        }
        LSS_dict["args_at_{z:.3f}".format(z=zi)] = args_at_z

    #np.save(outfile, LSS_dict)
    pickle.dump(LSS_dict, open(outfile + ".p", "wb"))
    print("Saved {0}".format(outfile))
    return
예제 #32
0
import healpy as hp
from classy import Class
import numpy as np

cosmo = Class()
NSIDE = 512
L_MAX_SCALARS = 1500
Npix = 12 * NSIDE**2

LENSING = 'yes'
OUTPUT_CLASS = 'tCl pCl lCl'

COSMO_PARAMS_NAMES = [
    "n_s", "omega_b", "omega_cdm", "100*theta_s", "ln10^{10}A_s", "tau_reio"
]
COSMO_PARAMS_MEANS = [0.9665, 0.02242, 0.11933, 1.04101, 3.047, 0.0561]
COSMO_PARAMS_SIGMA = [0.0038, 0.00014, 0.00091, 0.00029, 0.014, 0.0071]


def proposal(old_theta):
    return old_theta + np.dot(
        np.diag(COSMO_PARAMS_SIGMA) / 500,
        np.random.normal(0, 1, size=len(COSMO_PARAMS_MEANS)))


def sample_power_spectrum(cosmo_params):
    params = {
        'output': OUTPUT_CLASS,
        'l_max_scalars': L_MAX_SCALARS,
        'lensing': LENSING
    }
예제 #33
0
    'adptative_stepsize': 100,
    'scf_tuning_index': 0,
    'do_shooting': 'yes',
    'do_shooting_scf': 'yes',
    # back_integration_stepsize':1'e-4
    'use_big_theta_scf': 'yes',
    'scf_has_perturbations': 'yes',
    'attractor_ic_scf': 'no'
}

###############
#
# call CLASS
#
###############
M = Class()
M.set(common_settings)
M.compute()
#
# define conformal time sampling array
#
times = M.get_current_derived_parameters(['tau_rec', 'conformal_age'])
tau_rec = times['tau_rec']
tau_0 = times['conformal_age']
tau1 = np.logspace(math.log10(tau_ini), math.log10(tau_rec), tau_num_early)
tau2 = np.logspace(math.log10(tau_rec), math.log10(tau_0), tau_num_late)[1:]
tau2[-1] *= 0.999  # this tiny shift avoids interpolation errors
tau = np.concatenate((tau1, tau2))
tau_num = len(tau)
#
# use table of background and thermodynamics quantitites to define some functions
예제 #34
0
	    'selection_'+ic+'_file': fname, # File for the N(z) histogram of the i-th sample
	})

if False: # In case you want to plot the N(z)s
	fig, axs = plt.subplots(1, 2, figsize=(14, 5))
	finer_z_grid = np.linspace(0, 2, num=2000)
	for i, nz in enumerate(redshiftdistributions):
		ic = str(i+1)
		nz_grid = nz.eval(z_grid)
		finer_nz_grid = nz.eval(finer_z_grid)
		axs[i].plot(finer_z_grid, finer_nz_grid, label='Gaussian Mixture')
		axs[i].plot(z_grid, nz_grid, label='Histogram-ized', ls='steps')
	plt.show()

# Now run Class!
cosmo = Class()
# Scenario 1
cosmo.set(dict(mainparams.items()+scenario1.items()))
cosmo.compute()
cl1 = cosmo.density_cl(mainparams['l_max_lss'])
cosmo.struct_cleanup()
cosmo.empty()
# Scenario 2
cosmo.set(dict(mainparams.items()+scenario2.items()))
cosmo.compute()
cl2 = cosmo.density_cl(mainparams['l_max_lss'])
cosmo.struct_cleanup()
cosmo.empty()

# The Cls should be very close if the histogram is binned finely
nbins = len(redshiftdistributions)
예제 #35
0
    'omega_cdm': 0.12038,
    'A_s': 2.215e-9,
    'n_s': 0.9619,
    'tau_reio': 0.0925,
    # Take fixed value for primordial Helium (instead of automatic BBN adjustment)
    'YHe': 0.246,
    # other options and settings
    'compute damping scale':
    'yes',  # needed to output the time of damping scale crossing
    'gauge': 'newtonian'
}
##############
#
# call CLASS
#
M = Class()
M.set(common_settings)
M.compute()
#
# load perturbations
#
all_k = M.get_perturbations(
)  # this potentially constains scalars/tensors and all k values
print all_k['scalar'][0].viewkeys()
#
one_k = all_k['scalar'][
    0]  # this contains only the scalar perturbations for the requested k values
#
tau = one_k['tau [Mpc]']
Theta0 = 0.25 * one_k['delta_g']
phi = one_k['phi']
예제 #36
0
class TwentyOne(LymanAlpha):
    def __init__(self,
                 cosmo=None,
                 z_reio=7.82,
                 z_min=10.0,
                 z_max=150.0,
                 f_star_L=0.5,
                 f_star_X=0.004,
                 T_vir_cut=1e4 * Kelv,
                 use_hirata_fits=True,
                 sed_X="PL",
                 sed_X_kwargs={},
                 hmf_kwargs={
                     "mdef": "vir",
                     "model": "despali16"
                 }):
        """ Class to calculate Lyman-alpha heating

            :param cosmo: The cosmology, specified as an astropy.cosmology.FlatLambdaCDM instance
            :param z_reio: Redshift at reionization, passed to CLASS
            :param z_min: Minimum redshift (for interpolation tables)
            :param z_max: Maximum redshift (for interpolation tables)
            :param f_star_L: Efficiency of star formation, passed to Ly-A/X-ray classes
            :param f_star_X: Efficiency of star formation, X-ray, passed to Ly-A/X-ray classes
            :param T_vir_cut: Minimum virial temperature of star-forming halos, passed to Ly-A/X-ray classes
            :param use_hirata_fits: Whether to use fitting functions from Hirata for S_alpha and T_c
            :param sed_X: X-ray luminosity, "PL" or "Hirata"
            :param sed_X_kwargs: Parameters for X-ray luminosity
        """

        LymanAlpha.__init__(self,
                            cosmo=cosmo,
                            z_min=z_min,
                            z_max=z_max,
                            f_star_L=f_star_L,
                            f_star_X=f_star_X,
                            T_vir_cut=T_vir_cut,
                            sed_X=sed_X,
                            sed_X_kwargs=sed_X_kwargs,
                            hmf_kwargs=hmf_kwargs)

        self.z_reio = z_reio  # Redshift at reionization
        self.use_hirata_fits = use_hirata_fits  # Whether to use fitting functions from Hirata for S_alpha and T_c

        self.data_path = str(Path(__file__).parent / "../data/")

        self.load_constants()  # Set class-specific constants
        self.load_interpolations()  # Load interpolation tables
        self.initialize_class_inst()  # Initialize CLASS instance

    def load_constants(self):
        """ Load class-specific constants
        """
        self.nu_Lya = 2466 * 1e12 / Sec  # Ly-A absorption frequency, originally in THz
        self.gamma = 50 * 1e6 / Sec  # HWHM  of the 21-cm resonance, after Eq. (11) of astro-ph/0507102, originally in MHz
        self.T_21 = 68.2e-3  # 21-cm temperature, in K
        self.nu_21 = 1420405751.7667 / Sec  # Frequency of 21-cm transition
        self.A_21 = 2.86e-15 / Sec  # Spontaneous emission (Einstein-A) coefficient of 21-cm transition, after Eq. (3) of 0802.2102
        self.lambda_Lya = 121.567e-9 * Meter  # Ly-A absorption wavelength
        self.sigma_T = 0.66524587158 * barn  # Thomson scattering cross-section
        self.EHth = 13.6 * eV  # Hydrogen ground state energy

    def initialize_class_inst(self):
        """ Get electron ionization fraction from CLASS
        """
        class_parameters = {
            "H0": self.cosmo.H0.value,
            "Omega_b": self.cosmo.Ob0,
            "N_ur": self.cosmo.Neff,
            "Omega_cdm": self.cosmo.Odm0,
            "YHe": self.Y_p,
            "z_reio": self.z_reio
        }

        self.CLASS_inst = Class()
        self.CLASS_inst.set(class_parameters)
        self.CLASS_inst.compute()

    def x_e(self, z):
        """ Electron ionization fraction, from CLASS instance
        """
        return self.CLASS_inst.ionization_fraction(z)

    def T_b(self, z):
        """ Baryon temperature at given redshift, from CLASS instance
        """
        return self.CLASS_inst.baryon_temperature(z)

    def load_interpolations(self):
        """ Load interpolation tables
        """

        ## Load table from https://github.com/ntveem/lyaheating
        heffs = np.load(self.data_path + "/heffs.npy")
        # heffs[:, :, :, 1, 0][np.where(heffs[:, :, :, 1, 0] < 0)] = 1e-15

        # Argument arrays for T_k, T_s,...
        l10_t_ary = np.linspace(np.log10(0.1), np.log10(100.0), num=175)
        # ... and tau_GP (Gunn-Peterson optical depth)
        l10_tau_gp_ary = np.linspace(4.0, 7.0)

        # Net energy loss efficiency, defined in Eq. (37) of 1804.02406
        self.E_c_interp = RegularGridInterpolator(
            points=[l10_t_ary, l10_t_ary, l10_tau_gp_ary],
            values=((heffs[:, :, :, 0, 0])),
            bounds_error=False,
            fill_value=None)
        self.E_i_interp = RegularGridInterpolator(
            points=[l10_t_ary, l10_t_ary, l10_tau_gp_ary],
            values=((heffs[:, :, :, 1, 0])),
            bounds_error=False,
            fill_value=None)

        # Energy loss to spins, defined in Eq. (32) of astro-ph/0507102
        self.S_alpha_c_interp = RegularGridInterpolator(
            points=[l10_t_ary, l10_t_ary, l10_tau_gp_ary],
            values=(np.log10(heffs[:, :, :, 0, 2])),
            bounds_error=False,
            fill_value=None)
        self.S_alpha_i_interp = RegularGridInterpolator(
            points=[l10_t_ary, l10_t_ary, l10_tau_gp_ary],
            values=(np.log10(heffs[:, :, :, 1, 2])),
            bounds_error=False,
            fill_value=None)

        # Effective colour temperature, defined in Eq. (32) of astro-ph/0507102
        self.T_c_c_interp = RegularGridInterpolator(
            points=[l10_t_ary, l10_t_ary, l10_tau_gp_ary],
            values=(np.log10(heffs[:, :, :, 0, 3])),
            bounds_error=False,
            fill_value=None)
        self.T_c_i_interp = RegularGridInterpolator(
            points=[l10_t_ary, l10_t_ary, l10_tau_gp_ary],
            values=(np.log10(heffs[:, :, :, 1, 3])),
            bounds_error=False,
            fill_value=None)

        ## Rate coefficients

        # From astro-ph/0608067, Table 1
        kappa_10_eH_ary = np.loadtxt(self.data_path + "/kappa_10_eH_tab.dat")
        # From Zygelman (2005), http://adsabs.harvard.edu/abs/2005ApJ...622.1356Z, Table 2
        kappa_10_HH_ary = np.loadtxt(self.data_path + "/kappa_10_HH_tab.dat")

        self.l10_kappa_10_eH_interp = interp1d(
            np.log10(kappa_10_eH_ary)[:, 0],
            np.log10(kappa_10_eH_ary * Centimeter**3 / Sec)[:, 1],
            bounds_error=False,
            fill_value="extrapolate")
        self.l10_kappa_10_HH_interp = interp1d(
            np.log10(kappa_10_HH_ary)[:, 0],
            np.log10(kappa_10_HH_ary * Centimeter**3 / Sec)[:, 1],
            bounds_error=False,
            fill_value="extrapolate")

    def S_alpha_Hirata(self, Tk, Ts, tauGP):
        """ Hirata fitting functional form for energy loss to spins
        """
        xi = (1e-7 * tauGP)**(1.0 / 3.0) * Tk**(-2.0 / 3.0)
        a = 1.0 - 0.0631789 / Tk + 0.115995 / Tk**2 - 0.401403 / Ts / Tk + 0.336463 / Ts / Tk**2
        b = 1.0 + 2.98394 * xi + 1.53583 * xi**2 + 3.85289 * xi**3

        return a / b

    def T_c_Hirata(self, Tk, Ts):
        """ Hirata fitting functional form for effective colour temperature
        """
        T_c_inv = Tk**(-1.0) + 0.405535 * Tk**(-1.0) * (Ts**(-1.0) - Tk**
                                                        (-1.0))
        return 1 / T_c_inv

    def T_c_c(self, T_k, T_s, x_e, z):
        """ Effective color temperature from interpolation, continuum
        """
        if T_k <= 100.0:
            return 10**self.T_c_c_interp(
                [np.log10(T_k),
                 np.log10(T_s),
                 np.log10(self.tau_GP(x_e, z))])
        else:
            return T_k / 100 * 10**self.T_c_c_interp([
                np.log10(100.0),
                np.log10(T_s),
                np.log10(self.tau_GP(x_e, z))
            ])  # self.T_b(z)

    def T_c_i(self, T_k, T_s, x_e, z):
        """ Effective color temperature from interpolation, injected
        """
        if T_k <= 100.0:
            return 10**self.T_c_i_interp(
                [np.log10(T_k),
                 np.log10(T_s),
                 np.log10(self.tau_GP(x_e, z))])
        else:
            return T_k / 100 * 10**self.T_c_i_interp([
                np.log10(100.0),
                np.log10(T_s),
                np.log10(self.tau_GP(x_e, z))
            ])  # self.T_b(z)

    def x_HI(self, x_e):
        """ IGM neutral fraction, from electron ionization fraction
        """
        # return np.max(1 - x_e, 0)
        return np.where(x_e <= 1 + self.Y_p / 4 * (1 - self.Y_p),
                        1 - x_e * (1 - self.Y_p / (4 - 3 * self.Y_p)), 0)

    def T_CMB(self, z):
        """ CMB temperature, in K
        """
        return self.T_CMB_0 * (1 + z)

    def tau_GP(self, x_e, z):
        """ Gunn-Peterson optical depth, Eq. (35) of astro-ph/0507102 
        """
        H = self.cosmo.H(z).value * Kmps / Mpc
        return (3 * self.n_H(z) * self.x_HI(x_e) *
                self.gamma) / (2 * H * self.nu_Lya**3)

    def tau_21(self, T_s, x_e, z):
        """ 21-cm optical depth, Eq. (2) of 1804.02406
        """
        H = self.cosmo.H(z).value * Kmps / Mpc
        return 3 / (32 * np.pi) * (self.n_H(z) * self.x_HI(x_e) * self.A_21
                                   ) / (self.nu_21**3 * H) * self.T_21 / T_s

    def x_CMB(self, T_s, x_e, z):
        """ Spin-flip rate due to CMB heating, Eq. (14) of 1804.02406
        """
        return 1 / self.tau_21(T_s, x_e,
                               z) * (1 - np.exp(-self.tau_21(T_s, x_e, z)))

    def x_c(self, T_k, T_gamma, x_e, z):
        """ Collisional coupling spin-flip rate coefficient, Eq (3) of 0802.2102
        """
        return 4 * self.T_21 / (3 * self.A_21 * T_gamma) * self.n_H(z) * (
            10**self.l10_kappa_10_eH_interp(np.log10(T_k)) * x_e +
            10**self.l10_kappa_10_HH_interp(np.log10(T_k)))

    def x_alpha_c(self, T_k, T_s, T_gamma, x_e, J_c_o_J_0, z):
        """ Wouthuysen-Field spin-flip rate coefficient, continuum, Eq. (11) of astro-ph/0507102 
        """
        if self.use_hirata_fits:
            S_alpha_c = self.S_alpha_Hirata(T_k, T_s, self.tau_GP(x_e, z))
        else:
            S_alpha_c = 10**self.S_alpha_c_interp(
                np.log10([T_k, T_s, self.tau_GP(x_e, z)]))
        return 8 * np.pi * self.lambda_Lya**2 * self.gamma * self.T_21 / (
            9 * self.A_21 * T_gamma) * S_alpha_c * J_c_o_J_0 * self.J_0(z)

    def x_alpha_i(self, T_k, T_s, T_gamma, x_e, J_i_o_J_0, z):
        """ Wouthuysen-Field spin-flip rate coefficient, injected, Eq. (11) of astro-ph/0507102 
        """
        if self.use_hirata_fits:
            S_alpha_i = self.S_alpha_Hirata(T_k, T_s, self.tau_GP(x_e, z))
        else:
            S_alpha_i = 10**self.S_alpha_i_interp(
                np.log10([T_k, T_s, self.tau_GP(x_e, z)]))
        return 8 * np.pi * self.lambda_Lya**2 * self.gamma * self.T_21 / (
            9 * self.A_21 * T_gamma) * S_alpha_i * J_i_o_J_0 * self.J_0(z)

    def T_s_inv(self, T_s, T_k, T_gamma, x_e, J, z):
        """ Spin temperature (inverse), e.g. Eq (13) of 1804.02406
        """

        x_CMB = self.x_CMB(T_s, x_e, z)
        x_alpha_c = self.x_alpha_c(T_k, T_s, T_gamma, x_e, J[0], z)
        x_alpha_i = self.x_alpha_i(T_k, T_s, T_gamma, x_e, J[1], z)
        x_c = self.x_c(T_k, T_gamma, x_e, z)

        if self.use_hirata_fits:
            T_c_c = T_c_i = self.T_c_Hirata(T_k, T_s)
        else:
            T_c_c = self.T_c_c(T_k, T_s, x_e, z)
            T_c_i = self.T_c_i(T_k, T_s, x_e, z)

        return (x_CMB * T_gamma**-1 + x_alpha_c * T_c_c**-1 +
                x_alpha_i * T_c_i**-1 + x_c * T_k**-1) / (x_CMB + x_alpha_c +
                                                          x_alpha_i + x_c)

    def T_s_solve(self, T_k, T_gamma, x_e, J, z):
        """ Solve for spin temperature
        """
        T_s = (root(
            lambda T_s: self.T_s_inv(T_s[0], T_k, T_gamma, x_e, J, z) - T_s**
            -1, np.min([T_k, T_gamma])).x)[0]
        return T_s

    def E_CMB(self, T_s, T_gamma, T_k, x_e, z):
        """ Heating efficiency due to CMB, from Eq. (17) of 1804.02406
        """
        H = self.cosmo.H(z).value * Kmps / Mpc
        return self.x_HI(x_e) * self.A_21 / (2 * H) * self.x_CMB(
            T_s, x_e, z) * (T_gamma / T_s - 1) * self.T_21 / T_k

    def E_Compton(self, T_k, x_e, z):
        """ Compton heating efficiency, from Eq. (22) of 1312.4948 (TODO: but is it)
        """
        H = self.cosmo.H(z).value * Kmps / Mpc
        a_r = np.pi**2 / 15.0
        return 8 * self.sigma_T * a_r * (self.T_CMB(z) * Kelv)**4 * x_e / (
            3 * m_e * H) * (self.T_CMB(z) / T_k - 1)

    def dT_k_dz(self, T_s, T_k, T_gamma, x_e, J, z):
        """ Kinetic temperature evolution, from Eq (18) of 1804.02406
        """

        E_c = self.E_c_interp(np.log10([T_k, T_s, self.tau_GP(x_e, z)]))
        E_i = self.E_i_interp(np.log10([T_k, T_s, self.tau_GP(x_e, z)]))

        dT_k_dz = 1 / (1 + z) * (
            2 * T_k - 1 / (1 + self.f_He + x_e) *
            (E_c * J[0] + E_i * J[1] + self.E_CMB(T_s, T_gamma, T_k, x_e, z) +
             self.E_Compton(T_k, x_e, z)) * T_k)

        return dT_k_dz - 1 / (1 + z) * self.heat_coeff(z)

    def alpha_A(self, T):
        """ Case-A recombination coefficient, from Pequignot et al (1991), Eq. (1) and Table 1
        """

        zi = 1.0
        a = 5.596
        b = -0.6038
        c = 0.3436
        d = 0.4479
        t = 1e-4 * T / zi**2

        return zi * (a * t**b) / (1 + c * t**d) * 1e-13 * Centimeter**3 / Sec

    def alpha_B(self, T):
        """ Case-B recombination coefficient, from `DarkHistory`
        """
        return alpha_recomb(
            (k_B * T * Kelv) / eV, species="HI") * Centimeter**3 / Sec

    def rec_rate(self, z, x_e, T_k, case="B"):
        """ Recombination rate (Eq. 29 of 1312.4948)
        """
        Gamma_rec = -peebles_C(1 - self.x_HI(x_e), 1 +
                               z) * self.alpha_B(T_k) * x_e**2 * self.n_H(z)
        return self.dz_dt(z)**-1 * Gamma_rec
예제 #37
0
 #
 # deal with colors and legends
 #
 if i == 0:
     var_color = 'k'
     var_alpha = 1.
     legarray.append(r'ref. $\Lambda CDM$')
 else:
     var_color = 'r'
     var_alpha = 1. * i / (var_num - 1.)
 if i == var_num - 1:
     legarray.append(var_legend)
 #
 # call CLASS
 #
 M = Class()
 M.set(common_settings)
 M.set({var_name: var})
 M.compute()
 #
 # get Cls
 #
 clM = M.lensed_cl(2500)
 ll = clM['ell'][2:]
 clTT = clM['tt'][2:]
 clEE = clM['ee'][2:]
 clPP = clM['pp'][2:]
 #
 # get P(k) for common k values
 #
 pkM = []
예제 #38
0
class classy(_cosmo):

    def initialize(self):
        """Importing CLASS from the correct path, if given, and if not, globally."""
        # If path not given, try using general path to modules
        if not self.path and self.path_install:
            self.path = os.path.join(
                self.path_install, "code", classy_repo_rename)
        if self.path:
            self.log.info("Importing *local* classy from " + self.path)
            classy_build_path = os.path.join(self.path, "python", "build")
            post = next(d for d in os.listdir(classy_build_path) if d.startswith("lib."))
            classy_build_path = os.path.join(classy_build_path, post)
            if not os.path.exists(classy_build_path):
                # If path was given as an install path, try to install global one anyway
                if self.path_install:
                    self.log.info("Importing *global* CLASS (because not installed).")
                else:
                    self.log.error("Either CLASS is not in the given folder, "
                                   "'%s', or you have not compiled it.", self.path)
                    raise HandledException
            else:
                # Inserting the previously found path into the list of import folders
                sys.path.insert(0, classy_build_path)
        else:
            self.log.info("Importing *global* CLASS.")
        try:
            from classy import Class, CosmoSevereError, CosmoComputationError
        except ImportError:
            self.log.error(
                "Couldn't find the CLASS python interface. "
                "Make sure that you have compiled it, and that you either\n"
                " (a) specify a path (you didn't) or\n"
                " (b) install the Python interface globally with\n"
                "     '/path/to/class/python/python setup.py install --user'")
            raise HandledException
        self.classy = Class()
        # Propagate errors up
        global CosmoComputationError, CosmoSevereError
        # Generate states, to avoid recomputing
        self.n_states = 3
        self.states = [{"params": None, "derived": None, "derived_extra": None,
                        "last": 0} for i in range(self.n_states)]
        # Dict of named tuples to collect requirements and computation methods
        self.collectors = {}
        # Additional input parameters to pass to CLASS
        self.extra_args = self.extra_args or {}
        # Add general CLASS stuff
        self.extra_args["output"] = self.extra_args.get("output", "")
        if "sBBN file" in self.extra_args:
            self.extra_args["sBBN file"] = (
                self.extra_args["sBBN file"].format(classy=self.path))
        # Set aliases
        self.planck_to_classy = self.renames
        # Derived parameters that may not have been requested, but will be necessary later
        self.derived_extra = []

    def current_state(self):
        lasts = [self.states[i]["last"] for i in range(self.n_states)]
        return self.states[lasts.index(max(lasts))]

    def needs(self, **requirements):
        # Computed quantities required by the likelihood
        super(classy, self).needs(**requirements)
        for k, v in self._needs.items():
            # Products and other computations
            if k == "Cl":
                if any([("t" in cl.lower()) for cl in v]):
                    self.extra_args["output"] += " tCl"
                if any([(("e" in cl.lower()) or ("b" in cl.lower())) for cl in v]):
                    self.extra_args["output"] += " pCl"
                # For modern experiments, always lensed Cl's!
                self.extra_args["output"] += " lCl"
                self.extra_args["lensing"] = "yes"
                # For l_max_scalars, remember previous entries.
                self.extra_args["l_max_scalars"] = max(v.values())
                self.collectors[k] = collector(
                    method="lensed_cl", kwargs={"lmax": self.extra_args["l_max_scalars"]})
            elif k == "H":
                self.collectors[k] = collector(
                    method="Hubble",
                    args=[np.atleast_1d(v["z"])],
                    args_names=["z"],
                    arg_array=0)
                self.H_units_conv_factor = {"1/Mpc": 1, "km/s/Mpc": _c_km_s}
            elif k == "angular_diameter_distance":
                self.collectors[k] = collector(
                    method="angular_distance",
                    args=[np.atleast_1d(v["z"])],
                    args_names=["z"],
                    arg_array=0)
            elif k == "comoving_radial_distance":
                self.collectors[k] = collector(
                    method="z_of_r",
                    args_names=["z"],
                    args=[np.atleast_1d(v["z"])])
            elif k == "Pk_interpolator":
                self.extra_args["output"] += " mPk"
                self.extra_args["P_k_max_h/Mpc"] = max(
                    v.pop("k_max"), self.extra_args.get("P_k_max_h/Mpc", 0))
                self.add_z_for_matter_power(v.pop("z"))
                # Use halofit by default if non-linear requested but no code specified
                if v.get("nonlinear", False) and "non linear" not in self.extra_args:
                    self.extra_args["non linear"] = non_linear_default_code
                for pair in v.pop("vars_pairs", [["delta_tot", "delta_tot"]]):
                    if any([x != "delta_tot" for x in pair]):
                        self.log.error("NotImplemented in CLASS: %r", pair)
                        raise HandledException
                    self._Pk_interpolator_kwargs = {
                        "logk": True, "extrap_kmax": v.pop("extrap_kmax", None)}
                    name = "Pk_interpolator_%s_%s" % (pair[0], pair[1])
                    self.collectors[name] = collector(
                        method="get_pk_and_k_and_z",
                        kwargs=v,
                        post=(lambda P, k, z: PowerSpectrumInterpolator(
                            z, k, P.T, **self._Pk_interpolator_kwargs)))
            elif v is None:
                k_translated = self.translate_param(k, force=True)
                if k_translated not in self.derived_extra:
                    self.derived_extra += [k_translated]
            else:
                self.log.error("Requested product not known: %r", {k: v})
                raise HandledException
        # Derived parameters (if some need some additional computations)
        if any([("sigma8" in s) for s in self.output_params or requirements]):
            self.extra_args["output"] += " mPk"
            self.extra_args["P_k_max_h/Mpc"] = (
                max(1, self.extra_args.get("P_k_max_h/Mpc", 0)))
        # Adding tensor modes if requested
        if self.extra_args.get("r") or "r" in self.input_params:
            self.extra_args["modes"] = "s,t"
        # If B spectrum with l>50, or lensing, recommend using Halofit
        cls = self._needs.get("Cl", {})
        if (((any([("b" in cl.lower()) for cl in cls]) and
              max([cls[cl] for cl in cls if "b" in cl.lower()]) > 50) or
             any([("p" in cl.lower()) for cl in cls]) and
             not self.extra_args.get("non linear"))):
            self.log.warning("Requesting BB for ell>50 or lensing Cl's: "
                             "using a non-linear code is recommended (and you are not "
                             "using any). To activate it, set "
                             "'non_linear: halofit|hmcode|...' in classy's 'extra_args'.")
        # Cleanup of products string
        self.extra_args["output"] = " ".join(set(self.extra_args["output"].split()))
        # If no output requested, remove arguments that produce an error
        # (e.g. complaints if halofit requested but no Cl's computed.)
        # Needed for facilitating post-processing
        if not self.extra_args["output"]:
            for k in ["non linear"]:
                if k in self.extra_args:
                    self.log.info("Ignoring {%s: %r}, since no products requested.",
                                  k, self.extra_args[k])
                    self.extra_args.pop(k)
        # Finally, check that there are no repeated parameters between input and extra
        if set(self.input_params).intersection(set(self.extra_args)):
            self.log.error(
                "The following parameters appear both as input parameters and as CLASS "
                "extra arguments: %s. Please, remove one of the definitions of each.",
                list(set(self.input_params).intersection(set(self.extra_args))))
            raise HandledException

    def add_z_for_matter_power(self, z):
        if not hasattr(self, "z_for_matter_power"):
            self.z_for_matter_power = np.empty((0))
        self.z_for_matter_power = np.flip(np.sort(np.unique(np.concatenate(
            [self.z_for_matter_power, np.atleast_1d(z)]))), axis=0)
        self.extra_args["z_pk"] = " ".join(["%g" % zi for zi in self.z_for_matter_power])

    def translate_param(self, p, force=False):
        # "force=True" is used when communicating with likelihoods, which speak "planck"
        if self.use_planck_names or force:
            return self.planck_to_classy.get(p, p)
        return p

    def set(self, params_values_dict, i_state):
        # Store them, to use them later to identify the state
        self.states[i_state]["params"] = deepcopy(params_values_dict)
        # Prepare parameters to be passed: this-iteration + extra
        args = {self.translate_param(p): v for p, v in params_values_dict.items()}
        args.update(self.extra_args)
        # Generate and save
        self.log.debug("Setting parameters: %r", args)
        self.classy.struct_cleanup()
        self.classy.set(**args)

    def compute(self, _derived=None, cached=True, **params_values_dict):
        lasts = [self.states[i]["last"] for i in range(self.n_states)]
        try:
            if not cached:
                raise StopIteration
            # are the parameter values there already?
            i_state = next(i for i in range(self.n_states)
                           if self.states[i]["params"] == params_values_dict)
            # has any new product been requested?
            for product in self.collectors:
                next(k for k in self.states[i_state] if k == product)
            reused_state = True
            # Get (pre-computed) derived parameters
            if _derived == {}:
                _derived.update(self.states[i_state]["derived"])
            self.log.debug("Re-using computed results (state %d)", i_state)
        except StopIteration:
            reused_state = False
            # update the (first) oldest one and compute
            i_state = lasts.index(min(lasts))
            self.log.debug("Computing (state %d)", i_state)
            if self.timing:
                a = time()
            # Set parameters
            self.set(params_values_dict, i_state)
            # Compute!
            try:
                self.classy.compute()
            # "Valid" failure of CLASS: parameters too extreme -> log and report
            except CosmoComputationError:
                self.log.debug("Computation of cosmological products failed. "
                               "Assigning 0 likelihood and going on.")
                return 0
            # CLASS not correctly initialized, or input parameters not correct
            except CosmoSevereError:
                self.log.error("Serious error setting parameters or computing results. "
                               "The parameters passed were %r and %r. "
                               "See original CLASS's error traceback below.\n",
                               self.states[i_state]["params"], self.extra_args)
                raise  # No HandledException, so that CLASS traceback gets printed
            # Gather products
            for product, collector in self.collectors.items():
                # Special case: sigma8 needs H0, which cannot be known beforehand:
                if "sigma8" in self.collectors:
                    self.collectors["sigma8"].args[0] = 8 / self.classy.h()
                method = getattr(self.classy, collector.method)
                arg_array = self.collectors[product].arg_array
                if arg_array is None:
                    self.states[i_state][product] = method(
                        *self.collectors[product].args, **self.collectors[product].kwargs)
                elif isinstance(arg_array, Number):
                    self.states[i_state][product] = np.zeros(
                        len(self.collectors[product].args[arg_array]))
                    for i, v in enumerate(self.collectors[product].args[arg_array]):
                        args = (list(self.collectors[product].args[:arg_array]) + [v] +
                                list(self.collectors[product].args[arg_array + 1:]))
                        self.states[i_state][product][i] = method(
                            *args, **self.collectors[product].kwargs)
                elif arg_array in self.collectors[product].kwargs:
                    value = np.atleast_1d(self.collectors[product].kwargs[arg_array])
                    self.states[i_state][product] = np.zeros(value.shape)
                    for i, v in enumerate(value):
                        kwargs = deepcopy(self.collectors[product].kwargs)
                        kwargs[arg_array] = v
                        self.states[i_state][product][i] = method(
                            *self.collectors[product].args, **kwargs)
                if collector.post:
                    self.states[i_state][product] = collector.post(
                        *self.states[i_state][product])
            # Prepare derived parameters
            d, d_extra = self._get_derived_all(derived_requested=(_derived == {}))
            if _derived == {}:
                _derived.update(d)
            self.states[i_state]["derived"] = odict(
                [[p, (_derived or {}).get(p)] for p in self.output_params])
            # Prepare necessary extra derived parameters
            self.states[i_state]["derived_extra"] = deepcopy(d_extra)
            if self.timing:
                self.n += 1
                self.time_avg = (time() - a + self.time_avg * (self.n - 1)) / self.n
                self.log.debug("Average running time: %g seconds", self.time_avg)
        # make this one the current one by decreasing the antiquity of the rest
        for i in range(self.n_states):
            self.states[i]["last"] -= max(lasts)
        self.states[i_state]["last"] = 1
        return 1 if reused_state else 2

    def _get_derived_all(self, derived_requested=True):
        """
        Returns a dictionary of derived parameters with their values,
        using the *current* state (i.e. it should only be called from
        the ``compute`` method).

        Parameter names are returned in CLASS nomenclature.

        To get a parameter *from a likelihood* use `get_param` instead.
        """
        # Put all pamaremters in CLASS nomenclature (self.derived_extra already is)
        requested = [self.translate_param(p) for p in (
            self.output_params if derived_requested else [])]
        requested_and_extra = {
            p: None for p in set(requested).union(set(self.derived_extra))}
        # Parameters with their own getters
        if "rs_drag" in requested_and_extra:
            requested_and_extra["rs_drag"] = self.classy.rs_drag()
        elif "Omega_nu" in requested_and_extra:
            requested_and_extra["Omega_nu"] = self.classy.Omega_nu
        # Get the rest using the general derived param getter
        # No need for error control: classy.get_current_derived_parameters is passed
        # every derived parameter not excluded before, and cause an error, indicating
        # which parameters are not recognized
        requested_and_extra.update(
            self.classy.get_current_derived_parameters(
                [p for p, v in requested_and_extra.items() if v is None]))
        # Separate the parameters before returning
        # Remember: self.output_params is in sampler nomenclature,
        # but self.derived_extra is in CLASS
        derived = {
            p: requested_and_extra[self.translate_param(p)] for p in self.output_params}
        derived_extra = {p: requested_and_extra[p] for p in self.derived_extra}
        return derived, derived_extra

    def get_param(self, p):
        current_state = self.current_state()
        for pool in ["params", "derived", "derived_extra"]:
            value = deepcopy(
                current_state[pool].get(self.translate_param(p, force=True), None))
            if value is not None:
                return value
        self.log.error("Parameter not known: '%s'", p)
        raise HandledException

    def get_cl(self, ell_factor=False, units="muK2"):
        current_state = self.current_state()
        try:
            cls = deepcopy(current_state["Cl"])
        except:
            self.log.error(
                "No Cl's were computed. Are you sure that you have requested them?")
            raise HandledException
        # unit conversion and ell_factor
        ell_factor = ((cls["ell"] + 1) * cls["ell"] / (2 * np.pi))[2:] if ell_factor else 1
        units_factors = {"1": 1,
                         "muK2": _T_CMB_K * 1.e6,
                         "K2": _T_CMB_K}
        try:
            units_factor = units_factors[units]
        except KeyError:
            self.log.error("Units '%s' not recognized. Use one of %s.",
                           units, list(units_factors))
            raise HandledException
        for cl in cls:
            if cl not in ['pp', 'ell']:
                cls[cl][2:] *= units_factor ** 2 * ell_factor
        if "pp" in cls and ell_factor is not 1:
            cls['pp'][2:] *= ell_factor ** 2 * (2 * np.pi)
        return cls

    def _get_z_dependent(self, quantity, z):
        try:
            z_name = next(k for k in ["redshifts", "z"]
                          if k in self.collectors[quantity].kwargs)
            computed_redshifts = self.collectors[quantity].kwargs[z_name]
        except StopIteration:
            computed_redshifts = self.collectors[quantity].args[
                self.collectors[quantity].args_names.index("z")]
        i_kwarg_z = np.concatenate(
            [np.where(computed_redshifts == zi)[0] for zi in np.atleast_1d(z)])
        values = np.array(deepcopy(self.current_state()[quantity]))
        if quantity == "comoving_radial_distance":
            values = values[0]
        return values[i_kwarg_z]

    def get_H(self, z, units="km/s/Mpc"):
        try:
            return self._get_z_dependent("H", z) * self.H_units_conv_factor[units]
        except KeyError:
            self.log.error("Units not known for H: '%s'. Try instead one of %r.",
                           units, list(self.H_units_conv_factor))
            raise HandledException

    def get_angular_diameter_distance(self, z):
        return self._get_z_dependent("angular_diameter_distance", z)

    def get_comoving_radial_distance(self, z):
        return self._get_z_dependent("comoving_radial_distance", z)

    def get_Pk_interpolator(self):
        current_state = self.current_state()
        prefix = "Pk_interpolator_"
        return {k[len(prefix):]: deepcopy(v)
                for k, v in current_state.items() if k.startswith(prefix)}

    def close(self):
        self.classy.struct_cleanup()
예제 #39
0
z_ary = np.loadtxt(
    '/Users/andreacaputo/Desktop/Phd/lim-master 4/zary_forbiasav.dat')
bias_average_ary = np.loadtxt(
    '/Users/andreacaputo/Desktop/Phd/lim-master 4/biasav_ary.dat')

# let's interpolate
toint = interp1d(z_ary, bias_average_ary)


def biasav(z):
    return 1. * toint(z)


# create instance of the class "Class"
LambdaCDM = Class()
# pass input parameters
m1 = 0.06 / 3
m2 = 0  #0.06/3
m3 = 0  #0.06/3

LambdaCDM.set({'N_ncdm': 3})
LambdaCDM.set({'m_ncdm': str(m1) + ',' + str(m2) + ',' + str(m3)})
LambdaCDM.set({
    'omega_b': 0.022032,
    'omega_cdm': 0.12038,
    'h': 0.67556,
    'A_s': 2.215e-9,
    'n_s': 0.9619,
    'tau_reio': 0.0925
})
예제 #40
0
                  # The next line should be uncommented fgor higher precision (but significantly slower running)
                  'ncdm_fluid_approximation':3,
                  # You may uncomment this line to get more info on the ncdm sector from Class:
                  'background_verbose':1
                 }

# array of k values in 1/Mpc
kvec = np.logspace(-4,np.log10(3),100)
# array for storing legend
legarray = []

# loop over total mass values
for sum_masses in [0.1, 0.115, 0.13]:
    # normal hierarchy
    [m1, m2, m3] = get_masses(2.45e-3,7.50e-5, sum_masses, 'NH')
    NH = Class()
    NH.set(commonsettings)
    NH.set({'m_ncdm':str(m1)+','+str(m2)+','+str(m3)})
    NH.compute()
    # inverted hierarchy
    [m1, m2, m3] = get_masses(2.45e-3,7.50e-5, sum_masses, 'IH')
    IH = Class()
    IH.set(commonsettings)
    IH.set({'m_ncdm':str(m1)+','+str(m2)+','+str(m3)})
    IH.compute()
    pkNH = []
    pkIH = []
    for k in kvec:
        pkNH.append(NH.pk(k,0.))
        pkIH.append(IH.pk(k,0.))
    NH.struct_cleanup()
예제 #41
0
def lookup_Pk(cosmology='planck',nonlinear=0):
    """
    it saves the lookup table of the (non) linear power spectrum generate from CLASS.
    If nonlinear is False (default) it generates the linear power spectrum.
    You can choose between
    - planck
    - wmap
    - ML
    Choose also whether you want a nonlinear power spectrum, default is linear (nonlinear=0)
    """

    # k in h/Mpc
    k = N.logspace(-4., 3., 3*1024)

    if nonlinear==1:
        hf = 'halofit'
        saveto = 'data_itam/'+cosmology+'_pk.txt'

    else:
        hf = ''
        saveto = 'data_itam/'+cosmology+'_pk_linear.txt'

    if cosmology == 'planck':
        class_params = {
        'non linear': hf,
        'output': ['mPk','vTk'],
        'P_k_max_1/Mpc': 1000.,
        'z_pk': 0.,
        'A_s': 2.3e-9,
        'n_s': 0.96,
        'h': 0.7,
        'omega_b': 0.0225,
        'Omega_cdm': 0.25,
        }
        sig8_0 = 0.8


    elif cosmology == 'wmap':
        class_params = {
        'non linear': hf,
        'output': ['mPk','vTk'],
        'P_k_max_1/Mpc': 1000.,
        'z_pk': 0.,
        'A_s': 2.3e-9,
        'n_s': 0.967,
        'h': 0.704,
        'omega_b': 0.02253,
        'Omega_cdm': 0.226,
        }
        sig8_0 = 0.81


    elif cosmology == 'ML':
        class_params = {
        'non linear': hf,
        'output': ['mPk','vTk'],
        'P_k_max_1/Mpc': 1000.,
        'z_pk': 0.,
        'A_s': 2.3e-9,
        'n_s': 1.,
        'h': 0.73,
        'omega_b': 0.045*0.73**2,
        'Omega_cdm': 0.25-0.045,
        }
        sig8_0 = 0.9

    else:
        raise ValueError("the cosmology you chose does not exist")

    cosmoClass_nl = Class()
    cosmoClass_nl.set(class_params)
    cosmoClass_nl.compute()

    # rescale the normalization of matter power spectrum to have sig8=0.8 today
    sig8 = cosmoClass_nl.sigma8()
    A_s = cosmoClass_nl.pars['A_s']
    cosmoClass_nl.struct_cleanup() # does not clean the input class_params, cosmo.empty() does that
    cosmoClass_nl.set(A_s=A_s*(sig8_0*1./sig8)**2)
    cosmoClass_nl.compute()

    h = cosmoClass_nl.pars['h']
    pk_nl = N.asarray([ cosmoClass_nl.pk(x*h, 0.,)*h**3 for x in k ])

    kpk = N.vstack((k,pk_nl))
        
    N.savetxt(saveto,kpk)
    print('saving', saveto )
    return

params = {
	    'output': 'tCl lCl',
	    'l_max_scalars': 2508,
	    'lensing': 'yes',
	    'P_k_ini type': 'external_Pk',
	    'command': 'python /home/andrew/Research/tools/class_public-2.4.3/external_Pk/generate_Pk_cosines.py',
	    'custom1': 0,
	    'custom2': 0,
	    'custom3': 0,
	    'custom4': 0,
	    'custom5': 0}

#Get the unperturbed cls for comparison
cosmo = Class()
cosmo.set(params)
cosmo.compute()
clso=cosmo.lensed_cl(2508)['tt'][30:]
ell = cosmo.lensed_cl(2508)['ell'][30:]

for i in range(len(clso)):
	clso[i]=ell[i]*(ell[i]+1)/(4*np.pi)*((2.726e6)**2)*clso[i]
a=np.zeros(5)
cosmo.struct_cleanup()
cosmo.empty()
dcls=np.zeros([clso.shape[0],5])
h=1e-6
for m in range(5):
	a[m]=h
	# Define your cosmology (what is not specified will be set to CLASS default parameters)
예제 #43
0
class classy(SlikPlugin):
    """
    Plugin for CLASS.
    Credit: Brent Follin, Teresa Hamill, Andy Scacco
    """

    #{cosmoslik name : class name} - This needs to be done even for variables with the same name (because of for loop in self.model.set)!
    name_mapping = {'As':'A_s',
                    'ns':'n_s',
                    'r':'r',
                    'phi0':'custom1',
                    'm6':'custom2',
                    'nt':'n_t',
                    'ombh2':'omega_b',
                    'omch2':'omega_cdm',
                    'omnuh2':'omega_ncdm',
                    'tau':'tau_reio',
                    'H0':'H0',
                    'massive_neutrinos':'N_ncdm',
                    'massless_neutrinos':'N_ur',
                    'Yp':'YHe',
                    'pivot_scalar':'k_pivot',
                    }


    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()


    def __call__(self,
                 ombh2,
                 omch2,
                 H0,
                 As,
                 ns,
                 phi0,
                 m6,
                 tau,
                 w=None,
                 r=None,
                 nrun=None,
                 omk=0,
                 Yp=None,
                 Tcmb=2.7255,
                 massless_neutrinos=3.046,
                 l_max_scalar=3000,
                 l_max_tensor=3000,
                 pivot_scalar=0.05,
                 outputs=[],
                 **kwargs):

        d={self.name_mapping[k]:v for k,v in locals().items() 
        if k in self.name_mapping and v is not None}
        d['P_k_ini type']='external_Pk'
        d['modes'] = 's,t'
        self.model.set(output='tCl, lCl, pCl',
                       lensing='yes',
                       l_max_scalars=l_max_scalar,
                       command = '../LSODAtesnors/pk',
                       **d)
        self.model.compute()

        ell = arange(l_max_scalar+1)
        self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max_scalar)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
                           for x in ['TT','TE','EE','BB','PP','TP']}

        self.model.struct_cleanup()
        self.model.empty()
        
        return self.cmb_result

    def get_bao_observables(self, z):
        return {'H':self.model.Hubble(z),
                'D_A':self.model.angular_distance(z),
                'c':1.0,
                'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
예제 #44
0
def calculate_power(cosmology, k_min, k_max, z=0, num_k=500, scaled_by_h=True,
                    n_s=0.9619, logA=3.0980):
    """
    Calculate the power spectrum P(k,z) over the range k_min <= k <= k_max.
    """
    try:
        from classy import Class
        cosmo = Class()
    except ImportError:
        raise RuntimeError('power.calculate_power requires classy.')

    class_parameters = get_class_parameters(cosmology)
    class_parameters['output'] = 'mPk'
    if scaled_by_h:
        class_parameters['P_k_max_h/Mpc'] = k_max
    else:
        class_parameters['P_k_max_1/Mpc'] = k_max
    class_parameters['n_s'] = n_s
    class_parameters['ln10^{10}A_s'] = logA
    cosmo.set(class_parameters)
    cosmo.compute()

    if scaled_by_h:
        k_scale = cosmo.h()
        Pk_scale = cosmo.h()**3
    else:
        k_scale = 1.
        Pk_scale = 1.

    result = np.empty((num_k,), dtype=[('k', float), ('Pk', float)])
    result['k'][:] = np.logspace(np.log10(k_min), np.log10(k_max), num_k)
    for i, k in enumerate(result['k']):
        result['Pk'][i] = cosmo.pk(k * k_scale, z) * Pk_scale

    cosmo.struct_cleanup()
    cosmo.empty()

    return result
예제 #45
0
                  # The next line should be uncommented fgor higher precision (but significantly slower running)
                  'ncdm_fluid_approximation':3,
                  # You may uncomment this line to get more info on the ncdm sector from Class:
                  'background_verbose':1
                 }

# array of k values in 1/Mpc
kvec = np.logspace(-4,np.log10(3),100)
# array for storing legend
legarray = []

# loop over total mass values
for sum_masses in [0.1, 0.115, 0.13]:
    # normal hierarchy
    [m1, m2, m3] = get_masses(2.45e-3,7.50e-5, sum_masses, 'NH')
    NH = Class()
    NH.set(commonsettings)
    NH.set({'m_ncdm':str(m1)+','+str(m2)+','+str(m3)})
    NH.compute()
    # inverted hierarchy
    [m1, m2, m3] = get_masses(2.45e-3,7.50e-5, sum_masses, 'IH')
    IH = Class()
    IH.set(commonsettings)
    IH.set({'m_ncdm':str(m1)+','+str(m2)+','+str(m3)})
    IH.compute()
    pkNH = []
    pkIH = []
    for k in kvec:
        pkNH.append(NH.pk(k,0.))
        pkIH.append(IH.pk(k,0.))
    NH.struct_cleanup()
예제 #46
0
class TestClass(unittest.TestCase):
    """
    Testing Class and its wrapper classy on different cosmologies

    To run it, do
    ~] nosetest test_class.py

    It will run many times Class, on different cosmological scenarios, and
    everytime testing for different output possibilities (none asked, only mPk,
    etc..)

    """

    def setUp(self):
        """
        set up data used in the tests.
        setUp is called before each test function execution.
        """
        self.cosmo = Class()

        self.verbose = {
            'background_verbose': 1,
            'thermodynamics_verbose': 1,
            'perturbations_verbose': 1,
            'transfer_verbose': 1,
            'primordial_verbose': 1,
            'spectra_verbose': 1,
            'nonlinear_verbose': 1,
            'lensing_verbose': 1,
            'output_verbose': 1}
        self.scenario = {'lensing':'yes'}

    def tearDown(self):
        self.cosmo.struct_cleanup()
        self.cosmo.empty()
        del self.scenario

    @parameterized.expand(
        itertools.product(
            ('LCDM',
             'Mnu',
             'Positive_Omega_k',
             'Negative_Omega_k',
             'Isocurvature_modes', ),
            ({'output': ''}, {'output': 'mPk'}, {'output': 'tCl'},
             {'output': 'tCl pCl lCl'}, {'output': 'mPk tCl lCl', 'P_k_max_h/Mpc':10},
             {'output': 'nCl sCl'}, {'output': 'tCl pCl lCl nCl sCl'}),
            ({'gauge': 'newtonian'}, {'gauge': 'sync'}),
            ({}, {'non linear': 'halofit'})))
    def test_parameters(self, name, scenario, gauge, nonlinear):
        """Create a few instances based on different cosmologies"""
        if name == 'Mnu':
            self.scenario.update({'N_ncdm': 1, 'm_ncdm': 0.06})
        elif name == 'Positive_Omega_k':
            self.scenario.update({'Omega_k': 0.01})
        elif name == 'Negative_Omega_k':
            self.scenario.update({'Omega_k': -0.01})
        elif name == 'Isocurvature_modes':
            self.scenario.update({'ic': 'ad,nid,cdi', 'c_ad_cdi': -0.5})

        self.scenario.update(scenario)
        if scenario != {}:
            self.scenario.update(gauge)
        self.scenario.update(nonlinear)

        sys.stderr.write('\n\n---------------------------------\n')
        sys.stderr.write('| Test case %s |\n' % name)
        sys.stderr.write('---------------------------------\n')
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")

        setting = self.cosmo.set(
            dict(self.verbose.items()+self.scenario.items()))
        self.assertTrue(setting, "Class failed to initialize with input dict")

        cl_list = ['tCl', 'lCl', 'pCl', 'nCl', 'sCl']

        # Depending on the cases, the compute should fail or not
        should_fail = True
        output = self.scenario['output'].split()
        for elem in output:
            if elem in ['tCl', 'pCl']:
                for elem2 in output:
                    if elem2 == 'lCl':
                        should_fail = False
                        break

        if not should_fail:
            self.cosmo.compute()
        else:
            self.assertRaises(CosmoSevereError, self.cosmo.compute)
            return

        self.assertTrue(
            self.cosmo.state,
            "Class failed to go through all __init__ methods")
        if self.cosmo.state:
            print '--> Class is ready'
        # Depending
        if 'output' in self.scenario.keys():
            # Positive tests
            output = self.scenario['output']
            for elem in output.split():
                if elem in cl_list:
                    print '--> testing raw_cl function'
                    cl = self.cosmo.raw_cl(100)
                    self.assertIsNotNone(cl, "raw_cl returned nothing")
                    self.assertEqual(
                        np.shape(cl['tt'])[0], 101,
                        "raw_cl returned wrong size")
                if elem == 'mPk':
                    print '--> testing pk function'
                    pk = self.cosmo.pk(0.1, 0)
                    self.assertIsNotNone(pk, "pk returned nothing")
            # Negative tests of output functions
            if not any([elem in cl_list for elem in output.split()]):
                print '--> testing absence of any Cl'
                self.assertRaises(CosmoSevereError, self.cosmo.raw_cl, 100)
            if 'mPk' not in self.scenario['output'].split():
                print '--> testing absence of mPk'
                #args = (0.1, 0)
                self.assertRaises(CosmoSevereError, self.cosmo.pk, 0.1, 0)

        print '~~~~~~~~ passed ? '

    @parameterized.expand(
        itertools.product(
            ('massless', 'massive', 'both'),
            ('photons', 'massless', 'exact'),
            ('t', 's, t')))
    def test_tensors(self, scenario, method, modes):
        """Test the new tensor mode implementation"""
        self.scenario = {}
        if scenario == 'massless':
            self.scenario.update({'N_eff': 3.046, 'N_ncdm':0})
        elif scenario == 'massiv':
            self.scenario.update(
                {'N_eff': 0, 'N_ncdm': 2, 'm_ncdm': '0.03, 0.04',
                 'deg_ncdm': '2, 1'})
        elif scenario == 'both':
            self.scenario.update(
                {'N_eff': 1.5, 'N_ncdm': 2, 'm_ncdm': '0.03, 0.04',
                 'deg_ncdm': '1, 0.5'})

        self.scenario.update({
            'tensor method': method, 'modes': modes,
            'output': 'tCl, pCl'})
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")
        self.cosmo.set(
            dict(self.verbose.items()+self.scenario.items()))
        self.cosmo.compute()
예제 #47
0
                   'omega_b':0.022032,
                   'omega_cdm':0.12038,
                   'A_s':2.215e-9,
                   'n_s':0.9619,
                   'tau_reio':0.0925,
                   # Take fixed value for primordial Helium (instead of automatic BBN adjustment)
                   'YHe':0.246,
                   # other output and precision parameters
                   'l_max_scalars':5000,
                   'P_k_max_1/Mpc':10.0,
                   'gauge':'newtonian'}
###############
#
# call CLASS a first time just to compute z_rec (will compute transfer functions at default: z=0)
#
M = Class()
M.set(common_settings)
M.compute()
derived = M.get_current_derived_parameters(['z_rec','tau_rec','conformal_age'])
#print derived.viewkeys()
z_rec = derived['z_rec']
z_rec = int(1000.*z_rec)/1000. # round down at 4 digits after coma
M.struct_cleanup()  # clean output
M.empty()           # clean input
#
# call CLASS again (will compute transfer functions at inout value z_rec)
#
M = Class()
M.set(common_settings)
M.set({'z_pk':z_rec})
M.compute()
예제 #48
0
                   # LambdaCDM parameters
                   'h':0.67556,
                   'omega_b':0.022032,
                   'omega_cdm':0.12038,
                   'A_s':2.215e-9,
                   'n_s':0.9619,
                   'tau_reio':0.0925,
                   # Take fixed value for primordial Helium (instead of automatic BBN adjustment)
                   'YHe':0.246,
                   # other output and precision parameters
                   'l_max_scalars':5000}
###############
#
# call CLASS
#
M = Class()
M.set(common_settings)
M.compute()
cl_tot = M.raw_cl(3000)
cl_lensed = M.lensed_cl(3000)
M.struct_cleanup()  # clean output
M.empty()           # clean input
#
M.set(common_settings) # new input
M.set({'temperature contributions':'tsw'})
M.compute()
cl_tsw = M.raw_cl(3000)
M.struct_cleanup()
M.empty()
#
M.set(common_settings)
예제 #49
0
class_parameters = {
    'output': 'mTk,mPk',
    'H0': 67.66,
    'Omega_b': 0.04897,
    'N_ur': 3.046,
    'Omega_cdm': 0.2607,
    'YHe': 0.245,
    'z_reio': 7.82,
    'n_s': 0.9665,
    'A_s': 2.105e-9,
    'P_k_max_1/Mpc': 5000.0,
    'perturbed recombination': 'n',
    'non linear': 'halofit'
}

M = Class()
M.set(class_parameters)
M.set({'z_pk': z_compute})
M.compute()

h = M.h()  # get reduced Hubble for conversions to 1/Mpc

one_time = M.get_transfer(z_compute)

# Transfer functions

# Convert to units of Mpc^{-1}
k_ary = one_time['k (h/Mpc)'] * h

delta_b_ary = one_time['d_b']
# delta_chi_ary = one_time['d_chi']
예제 #50
0
import numpy as np
from classy import Class


# In[ ]:

font = {'size'   : 20, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'


# In[ ]:

#Lambda CDM
LCDM = Class()
LCDM.set({'Omega_cdm':0.25,'Omega_b':0.05})
LCDM.compute()


# In[ ]:

#Einstein-de Sitter
CDM = Class()
CDM.set({'Omega_cdm':0.95,'Omega_b':0.05})
CDM.compute()

# Just to cross-check that Omega_Lambda is negligible
# (but not exactly zero because we neglected radiation)
derived = CDM.get_current_derived_parameters(['Omega0_lambda'])
print derived
예제 #51
0
from classy import Class
import healpy as hp
import numpy as np

params = [('output', 'tCl pCl lCl'), ('l_max_scalars', 5000),
          ('lensing', 'yes'), ('n_s', 0.9665), ('omega_b', 0.02242),
          ('omega_cdm', 0.11933), ('100*theta_s', 1.04101),
          ('ln10^{10}A_s', 3.047), ('tau_reio', 0.0561)]

cosmo = Class()
cosmo.set(params)
cosmo.compute()
cls = cosmo.lensed_cl(5000)
cls_list = [it for _, it in cls.items()]
eb_tb = np.zeros(shape=cls["tt"].shape)
I, Q, U = hp.synfast(
    (cls['tt'], cls['ee'], cls['bb'], cls['te'], eb_tb, eb_tb),
    nside=32,
    new=True)
print(Q.shape)
cosmo.struct_cleanup()
cosmo.empty()
예제 #52
0
                   'recfast_z_initial':z_max_pk,
                   #'k_step_sub':'0.01',
                   'k_per_decade_for_pk':k_per_decade,
                   'k_per_decade_for_bao':k_per_decade,
                   'k_min_tau0':k_min_tau0, # this value controls the minimum k value in the figure
                   'perturb_sampling_stepsize':'0.05',
                   'P_k_max_1/Mpc':P_k_max_inv_Mpc,
                   'compute damping scale':'yes', # needed to output and plot Silk damping scale
                   'gauge':'newtonian'}

###############
#
# call CLASS
#
###############
M = Class()
M.set(common_settings)
M.compute()
#
# define conformal time sampling array
#
times = M.get_current_derived_parameters(['tau_rec','conformal_age'])
tau_rec=times['tau_rec']
tau_0 = times['conformal_age']
tau1 = np.logspace(math.log10(tau_ini),math.log10(tau_rec),tau_num_early)
tau2 = np.logspace(math.log10(tau_rec),math.log10(tau_0),tau_num_late)[1:]
tau2[-1] *= 0.999 # this tiny shift avoids interpolation errors
tau = np.concatenate((tau1,tau2))
tau_num = len(tau)
#
# use table of background and thermodynamics quantitites to define some functions
예제 #53
0
    def do_model_setup(self, params):
        """ Method to calculate the power spectrum primordial and
        lensing templates for a given set of cosmological parameters.

        This computation requires that the lmax be set much higher
        that the lmax required in the final analys.s

        Parameters
        ----------
        params: dict
            Dictionary of cosmological parameters to be sent to CLASS.

        Returns
        -------
        tuple(array_like(float))
            Tuple containing the BB primordial and lensing templates.
        """
        try:
            params.pop('a_lens')
        except:
            pass
        params.update({
            'output': 'tCl pCl lCl',
            'l_max_scalars': 5000,
            'l_max_tensors': 2000,
            'modes': 's, t',
            'r': 1,
            'lensing': 'yes',
        })
        cosm = Class()
        cosm.set(params)
        cosm.compute()
        # get the lensed and raw power spectra up to the maximum
        # multipole used in the likelihood analysis. Multiply by
        # T_CMB ^ 2 to get from dimensionless to uK^2 units.
        lensed_cls = cosm.lensed_cl(3 * self.nside - 1)['bb'] * (2.7225e6)**2
        raw_cls = cosm.raw_cl(3 * self.nside - 1)['bb'] * (2.7225e6)**2
        # get ells, used in the calculation of the foreground model
        # over the same range.
        ells = cosm.raw_cl(3 * self.nside - 1)['ell']
        # do the house keeping for the CLASS code.
        cosm.struct_cleanup()
        cosm.empty()
        # calculate the lensing-only template
        lens_template = self.apply_coupling(lensed_cls - raw_cls)
        raw_cls = self.apply_coupling(raw_cls)
        # now separately do the foreground template setup.
        if self.marg:
            fg_template = np.zeros(3 * self.nside)
            fg_template[1:] = (ells[1:] / 80.)**-2.4
            fg_template = self.apply_coupling(fg_template)
            return (raw_cls, lens_template, fg_template)
        return (raw_cls, lens_template)
예제 #54
0
lnAs_fid = 3.094
As_fid = np.exp(lnAs_fid)*1e-10
ns_fid = 0.9645
mnu_fid = 0.06
h0_fid = 0.6703234
om0_fid = (obh2_fid+och2_fid+mnu_fid/93.14)/h0_fid**2
ode0_fid = 1.0-om0_fid
b2_fid = 0.0
sigma_fid = 1.0
w_fid = -1.0

# set prameters for model calculation
zmax = 5.0
kmin = 1e-2
kmax = 0.5
cosmo_fid = Class()
pars_fid = {'100*theta_s':theta_fid,'omega_b':obh2_fid,'omega_cdm':och2_fid,              
            'A_s':As_fid,'n_s':ns_fid,'m_ncdm':mnu_fid/3.,              
            'P_k_max_h/Mpc':kmax,'z_max_pk':zmax,              
            'output':'mPk','N_ur':0.00641,'N_ncdm':1,              
            'T_ncdm':0.71611,'deg_ncdm':3.}
cosmo_fid.set(pars_fid)
cosmo_fid.compute()
kh_arr = np.logspace(np.log10(kmin),np.log10(kmax),1000)
nk = 19
del_k = 0.01
kh_out = np.array([0.02+i*del_k for i in range(0,nk)])
kh_out_02 = np.tile(kh_out,2)


'''
예제 #55
0
# coding: utf-8

# In[ ]:

# import classy module
from classy import Class

# In[ ]:

# create instance of the class "Class"
LambdaCDM = Class()
# pass input parameters
LambdaCDM.set({
    'omega_b': 0.022032,
    'omega_cdm': 0.12038,
    'h': 0.67556,
    'A_s': 2.215e-9,
    'n_s': 0.9619,
    'tau_reio': 0.0925
})
LambdaCDM.set({
    'output': 'tCl,pCl,lCl,mPk',
    'lensing': 'yes',
    'P_k_max_1/Mpc': 3.0
})
# run class
LambdaCDM.compute()

# In[ ]:

# get all C_l output
예제 #56
0
def main(target=args['target'], base=args['base'], new=args['new']):
    # create instance of the class "Class"
    TargetCosmo = Class()

    # pass input parameters
    print("The default cosmology is read from " + target)
    target_param_dict = read_file(target)
    TargetCosmo.set(target_param_dict)

    # run class
    TargetCosmo.compute()
    os.remove(target_param_dict[rootName] + "parameters.ini")
    os.remove(target_param_dict[rootName] + "unused_parameters")
    theta_target = TargetCosmo.theta_s_100()
    print("Target 100*theta_s = ", theta_target)

    # The second (new) cosmology
    print("The new cosmology is read from " + base + " and " + new[1])
    base_param_dict = read_file(base)

    # Create a new table with the final cosmologies
    if new[0] == 'table':
        shutil.copy(new[1], cosmology_table)

    new_params, numCosm = read_input(new)
    # for each new cosmology
    for iCosm in range(numCosm):
        # Check whether the hubble is set to the TBD value (HubbleDef); if not, don't meddle
        if np.abs(new_params[HubbleParam][iCosm] -
                  HubbleDef) > 1.e-7 and new[0] == 'table':
            continue

        NewCosmo = Class()
        # Load the base parameter values
        NewCosmo.set(base_param_dict)
        # Create a dictionary
        new_param_dict = read_line(new_params,
                                   iCosm) if new[0] == 'table' else new_params
        if new_param_dict[rootName][-1:] != '.':
            new_param_dict[rootName] += '.'
        # create new directory with the root name unless it exists already
        dir_par = new_param_dict[rootName][:-1]
        if os.path.isdir(dir_par) != True: os.mkdir(dir_par)
        os.chdir(dir_par)
        NewCosmo.set(new_param_dict)

        # run class
        NewCosmo.compute()
        h = search(NewCosmo, theta_target)
        write_dict_to_ini(new_param_dict, h)
        os.chdir('..')

        # if running in table regime, modify the README table and delete everything else
        if new[0] == 'table':
            # modify the H0 and A_s columns in the final table
            A_s = new_param_dict['A_s']
            modify_table(cosmology_table, new_param_dict[rootName][:-1], h,
                         A_s)
            # Get rid of the evidence
            shutil.rmtree(dir_par)
예제 #57
0
import numpy as np
from classy import Class

params = {'output': 'tCl pCl lCl', 'lensing': 'yes'}
cosmo = Class()
cosmo.set(params)
cosmo.compute()
cls = cosmo.lensed_cl(768)

for k in ['tt', 'te', 'ee', 'bb']:
    cls[k] *= 1e12

np.savetxt('data/example_cls.txt',
           np.array([cls['ell'], cls['tt'], cls['te'], cls['ee'],
                     cls['bb']]).T,
           fmt=('%i %e %e %e %e'),
           header='ell TT TE EE BB',
           delimiter=',')
예제 #58
0
class classy(SlikPlugin):
    """
    Plugin for CLASS.
    Credit: Brent Follin, Teresa Hamill, Andy Scacco
    """

    #{cosmoslik name : class name} - This needs to be done even for variables with the same name (because of for loop in self.model.set)!
    name_mapping = {#'As':'A_s',
                    #'ns':'n_s',
                    #'r':'r',
                    'custom1':'custom1',
                    'custom2':'custom2',
                    'custom3':'custom3',
                    #'nt':'n_t',
                    'ombh2':'omega_b',
                    'omch2':'omega_cdm',
                    'omnuh2':'omega_ncdm',
                    'tau':'tau_reio',
                    'H0':'H0',
                    'massive_neutrinos':'N_ncdm',
                    'massless_neutrinos':'N_ur',
                    'Yp':'YHe',
                    'pivot_scalar':'k_pivot',
                    'omk':'Omega_k',
                    'l_max_scalar':'l_max_scalars',
                    'l_max_tensor':'l_max_tensors',
                    'Tcmb':'T_cmb'
                    }


    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()

    #def __call__(self,
    #             **kwargs):
    
    #    d={}
     #   for k, v in kwargs.iteritems():
      #      if k in self.name_mapping and v is not None:
       #         d[self.name_mapping[k]]=v
        #    else:
         #       d[k]=v
    
    #def __call__(self,
                 #ombh2,
                 #omch2,
                 #H0,
                 #As,
                 #ns,
                 #custom1,
                 #custom2,
                 #custom3,
                 #tau,
                 #w=None,
                 #r=None,
                 #nrun=None,
                 #omk=0,
                 #Yp=None,
                 #Tcmb=2.7255,
                 #massless_neutrinos=3.046,
                 #l_max_scalar=3000,
                 #l_max_tensor=3000,
                 #pivot_scalar=0.05,
                 #outputs=[],
                 #**kwargs):

        #print kwargs
        
    def __call__(self,**kwargs):
        #print kwargs
        #print kwargs['classparamlist']
        #print kwargs['d']
        
        d={}
        for k,v in kwargs.iteritems():
            if k in kwargs['classparamlist']:
                if k in self.name_mapping and v is not None:
                    d[self.name_mapping[k]]=v
                else:
                    d[k]=v
            
        
        #d['P_k_ini type']='external_Pk'
        #d['modes'] = 's,t'
        self.model.set(**d)
                       
        l_max = d['l_max_scalars']
        Tcmb =  d['T_cmb']
        
        #print l_max

        #print d
        
        self.model.compute()

        ell = arange(l_max+1)
        self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
                           for x in ['TT','TE','EE','BB','PP','TP']}

        self.model.struct_cleanup()
        self.model.empty()
        
        return self.cmb_result

    def get_bao_observables(self, z):
        return {'H':self.model.Hubble(z),
                'D_A':self.model.angular_distance(z),
                'c':1.0,
                'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
예제 #59
0
                   'h':0.67556,
                   'omega_b':0.022032,
                   'omega_cdm':0.12038,
                   'A_s':2.215e-9,
                   'n_s':0.9619,
                   'tau_reio':0.0925,
                   # Take fixed value for primordial Helium (instead of automatic BBN adjustment)
                   'YHe':0.246,
                   # other options and settings
                   'compute damping scale':'yes', # needed to output the time of damping scale crossing
                   'gauge':'newtonian'}
##############
#
# call CLASS
#
M = Class()
M.set(common_settings)
M.compute()
#
# load perturbations
#
all_k = M.get_perturbations()  # this potentially constains scalars/tensors and all k values
print all_k['scalar'][0].viewkeys()
#
one_k = all_k['scalar'][0]     # this contains only the scalar perturbations for the requested k values
#
tau = one_k['tau [Mpc]']
Theta0 = 0.25*one_k['delta_g']
phi = one_k['phi']
psi = one_k['psi']
theta_b = one_k['theta_b']