Пример #1
0
def test_class_setup():
    cosmology = astropy.cosmology.Planck13
    assert cosmology.Om0 == cosmology.Odm0 + cosmology.Ob0
    assert 1 == (cosmology.Om0 + cosmology.Ode0 + cosmology.Ok0 +
                 cosmology.Ogamma0 + cosmology.Onu0)
    class_parameters = get_class_parameters(cosmology)
    try:
        from classy import Class
        cosmo = Class()
        cosmo.set(class_parameters)
        cosmo.compute()
        assert cosmo.h() == cosmology.h
        assert cosmo.T_cmb() == cosmology.Tcmb0.value
        assert cosmo.Omega_b() == cosmology.Ob0
        # Calculate Omega(CDM)_0 two ways:
        assert abs((cosmo.Omega_m() - cosmo.Omega_b()) -
                   (cosmology.Odm0 - cosmology.Onu0)) < 1e-8
        assert abs(cosmo.Omega_m() - (cosmology.Om0 - cosmology.Onu0)) < 1e-8
        # CLASS calculates Omega_Lambda itself so this is a non-trivial test.
        calculated_Ode0 = cosmo.get_current_derived_parameters(
            ['Omega_Lambda'])['Omega_Lambda']
        assert abs(calculated_Ode0 - (cosmology.Ode0 + cosmology.Onu0)) < 1e-5
        cosmo.struct_cleanup()
        cosmo.empty()
    except ImportError:
        pass
Пример #2
0
def test_class_setup():
    cosmology = astropy.cosmology.Planck13
    assert cosmology.Om0 == cosmology.Odm0 + cosmology.Ob0
    assert 1 == (cosmology.Om0 + cosmology.Ode0 + cosmology.Ok0 +
                 cosmology.Ogamma0 + cosmology.Onu0)
    class_parameters = get_class_parameters(cosmology)
    try:
        from classy import Class
        cosmo = Class()
        cosmo.set(class_parameters)
        cosmo.compute()
        assert cosmo.h() == cosmology.h
        assert cosmo.T_cmb() == cosmology.Tcmb0.value
        assert cosmo.Omega_b() == cosmology.Ob0
        # Calculate Omega(CDM)_0 two ways:
        assert abs((cosmo.Omega_m() - cosmo.Omega_b()) -
                   (cosmology.Odm0 - cosmology.Onu0)) < 1e-8
        assert abs(cosmo.Omega_m() - (cosmology.Om0 - cosmology.Onu0)) < 1e-8
        # CLASS calculates Omega_Lambda itself so this is a non-trivial test.
        calculated_Ode0 = cosmo.get_current_derived_parameters(
            ['Omega_Lambda'])['Omega_Lambda']
        assert abs(calculated_Ode0 - (cosmology.Ode0 + cosmology.Onu0)) < 1e-5
        cosmo.struct_cleanup()
        cosmo.empty()
    except ImportError:
        pass
Пример #3
0
def m_Pk(k=np.logspace(-3, 0., 100), z=0.53, nl_model='trg'):
    print k
    cosmo = Class()

    CLASS_INPUT = {}

    CLASS_INPUT['Mnu'] = ([{'N_eff': 0.0, 'N_ncdm': 1, 'm_ncdm': 0.06, 'deg_ncdm': 3.0}], 'normal')
    CLASS_INPUT['Output_spectra'] = ([{'output': 'mPk', 'P_k_max_1/Mpc': 1, 'z_pk': z}], 'power')

    CLASS_INPUT['Nonlinear'] = ([{'non linear': nl_model}], 'power')
            
    verbose = {}
    #    'input_verbose': 1,
    #    'background_verbose': 1,
    #    'thermodynamics_verbose': 1,
    #    'perturbations_verbose': 1,
    #    'transfer_verbose': 1,
    #    'primordial_verbose': 1,
    #    'spectra_verbose': 1,
    #    'nonlinear_verbose': 1,
    #    'lensing_verbose': 1,
    #    'output_verbose': 1
    #    }

    cosmo.struct_cleanup()
    cosmo.empty()


    INPUTPOWER = []
    INPUTNORMAL = [{}]
    for key, value in CLASS_INPUT.iteritems():
        models, state = value
        if state == 'power':
            INPUTPOWER.append([{}]+models)
        else:
            INPUTNORMAL.extend(models)

        PRODPOWER = list(itertools.product(*INPUTPOWER))

        DICTARRAY = []
        for normelem in INPUTNORMAL:
            for powelem in PRODPOWER:  # itertools.product(*modpower):
                temp_dict = normelem.copy()
                for elem in powelem:
                    temp_dict.update(elem)
                DICTARRAY.append(temp_dict)

    scenario = {}
    for dic in DICTARRAY:
        scenario.update(dic)
    setting = cosmo.set(dict(verbose.items()+scenario.items()))
    cosmo.compute()
    pk_out = [] 
    for k_i in k: 
        pk_out.append(cosmo.pk(k_i,z))
    return pk_out 
Пример #4
0
    def do_model_setup(self, params):
        """ Method to calculate the power spectrum primordial and
        lensing templates for a given set of cosmological parameters.

        This computation requires that the lmax be set much higher
        that the lmax required in the final analys.s

        Parameters
        ----------
        params: dict
            Dictionary of cosmological parameters to be sent to CLASS.

        Returns
        -------
        tuple(array_like(float))
            Tuple containing the BB primordial and lensing templates.
        """
        try:
            params.pop('a_lens')
        except:
            pass
        params.update({
            'output': 'tCl pCl lCl',
            'l_max_scalars': 5000,
            'l_max_tensors': 2000,
            'modes': 's, t',
            'r': 1,
            'lensing': 'yes',
        })
        cosm = Class()
        cosm.set(params)
        cosm.compute()
        # get the lensed and raw power spectra up to the maximum
        # multipole used in the likelihood analysis. Multiply by
        # T_CMB ^ 2 to get from dimensionless to uK^2 units.
        lensed_cls = cosm.lensed_cl(3 * self.nside - 1)['bb'] * (2.7225e6)**2
        raw_cls = cosm.raw_cl(3 * self.nside - 1)['bb'] * (2.7225e6)**2
        # get ells, used in the calculation of the foreground model
        # over the same range.
        ells = cosm.raw_cl(3 * self.nside - 1)['ell']
        # do the house keeping for the CLASS code.
        cosm.struct_cleanup()
        cosm.empty()
        # calculate the lensing-only template
        lens_template = self.apply_coupling(lensed_cls - raw_cls)
        raw_cls = self.apply_coupling(raw_cls)
        # now separately do the foreground template setup.
        if self.marg:
            fg_template = np.zeros(3 * self.nside)
            fg_template[1:] = (ells[1:] / 80.)**-2.4
            fg_template = self.apply_coupling(fg_template)
            return (raw_cls, lens_template, fg_template)
        return (raw_cls, lens_template)
Пример #5
0
def class_spectrum(params):
    """ Function to generate the theoretical CMB power spectrum for a
    given set of parameters.

    Parameters
    ----------
    params: dict
        dict containing the parameters expected by CLASS and their values.

    Returns
    -------
    array_like(float)
        Array of shape (4, lmax + 1) containing the TT, EE, BB, TE power
        spectra.

    """
    # This line is crucial to prevent pop from removing the lensing efficieny
    # from future runs in the same script.
    class_pars = {**params}
    try:
        # if lensing amplitude is set, remove it from dictionary that will
        # be passed to CLASS.
        a_lens = class_pars.pop('a_lens')
    except KeyError:
        # if not set in params dictionary just set to 1.
        a_lens = 1
    # generate the CMB realizations.
    print("Running CLASS with lensing efficiency: ", a_lens)
    cos = Class()
    cos.set(class_pars)
    cos.compute()
    # returns CLASS format, 0 to lmax, dimensionless, Cls.
    # multiply by (2.7225e6) ** 2 to get in uK_CMB ^ 2
    lensed_cls = cos.lensed_cl()
    raw_bb = cos.raw_cl()['bb'][:class_pars['l_max_scalars'] + 1]
    # calculate the lensing contribution to BB and rescale by
    # the lensing amplitude factor.
    lensing_bb = a_lens * (lensed_cls['bb'] - raw_bb)
    cos.struct_cleanup()
    cos.empty()
    synfast_cls = np.zeros((4, class_pars['l_max_scalars'] + 1))
    synfast_cls[0, :] = lensed_cls['tt']
    synfast_cls[1, :] = lensed_cls['ee']
    synfast_cls[2, :] = lensing_bb + raw_bb
    synfast_cls[3, :] = lensed_cls['te']
    return synfast_cls * (2.7225e6)**2
Пример #6
0
def calculate_power(cosmology,
                    k_min,
                    k_max,
                    z=0,
                    num_k=500,
                    scaled_by_h=True,
                    n_s=0.9619,
                    logA=3.0980):
    """
    Calculate the power spectrum P(k,z) over the range k_min <= k <= k_max.
    """
    try:
        from classy import Class
        cosmo = Class()
    except ImportError:
        raise RuntimeError('power.calculate_power requires classy.')

    class_parameters = get_class_parameters(cosmology)
    class_parameters['output'] = 'mPk'
    if scaled_by_h:
        class_parameters['P_k_max_h/Mpc'] = k_max
    else:
        class_parameters['P_k_max_1/Mpc'] = k_max
    class_parameters['n_s'] = n_s
    class_parameters['ln10^{10}A_s'] = logA
    cosmo.set(class_parameters)
    cosmo.compute()

    if scaled_by_h:
        k_scale = cosmo.h()
        Pk_scale = cosmo.h()**3
    else:
        k_scale = 1.
        Pk_scale = 1.

    result = np.empty((num_k, ), dtype=[('k', float), ('Pk', float)])
    result['k'][:] = np.logspace(np.log10(k_min), np.log10(k_max), num_k)
    for i, k in enumerate(result['k']):
        result['Pk'][i] = cosmo.pk(k * k_scale, z) * Pk_scale

    cosmo.struct_cleanup()
    cosmo.empty()

    return result
    def get_theoretical_TT_TE_EE_unbinned_power_spec_D_ell(self, class_dict):
        ellmin = self.lmin_class
        ellmax = self.plmax
        cosmo = Class()
        cosmo.set(class_dict)
        cosmo.compute()
        cls = cosmo.lensed_cl(3000)
        cosmo.struct_cleanup()
        cosmo.empty()

        #get in units of microkelvin squared
        T_fac=(self.T_cmb*1e6)**2

        ell=cls['ell']
        D_fac=ell*(ell+1.)/(2*np.pi)

        Dltt=(T_fac*D_fac*cls['tt'])[ellmin:ellmax+1]
        Dlte=(T_fac*D_fac*cls['te'])[ellmin:ellmax+1]
        Dlee=(T_fac*D_fac*cls['ee'])[ellmin:ellmax+1]
        return cls['ell'][ellmin:ellmax+1], Dltt, Dlte, Dlee
Пример #8
0
def get_bao_rs_dV(zs,params=None,engine='camb',de='ppf'):
    #FIXME: camb and class only agree at 3% level!!!
    import camb
    params = map_params(params,engine=engine)
    if engine=='camb':
        pars = set_camb_pars(params=params,de=de)
        results = camb.get_results(pars)
        retval = results.get_BAO(zs,pars)[:,0]
    elif engine=='class':
        from classy import Class
        zs = np.asarray(zs)
        cosmo = Class()
        params['output'] = ''
        cosmo.set(params)
        cosmo.compute()
        Hzs = np.array([cosmo.Hubble(z) for z in zs])
        D_As = np.array([cosmo.angular_distance(z) for z in zs])
        D_Vs = ((1+zs)**2 * D_As**2 * zs/Hzs)**(1/3.)
        retval = cosmo.rs_drag()/D_Vs
        cosmo.struct_cleanup()
        cosmo.empty()
    return retval
Пример #9
0
def calculate_power(cosmology, k_min, k_max, z=0, num_k=500, scaled_by_h=True,
                    n_s=0.9619, logA=3.0980):
    """
    Calculate the power spectrum P(k,z) over the range k_min <= k <= k_max.
    """
    try:
        from classy import Class
        cosmo = Class()
    except ImportError:
        raise RuntimeError('power.calculate_power requires classy.')

    class_parameters = get_class_parameters(cosmology)
    class_parameters['output'] = 'mPk'
    if scaled_by_h:
        class_parameters['P_k_max_h/Mpc'] = k_max
    else:
        class_parameters['P_k_max_1/Mpc'] = k_max
    class_parameters['n_s'] = n_s
    class_parameters['ln10^{10}A_s'] = logA
    cosmo.set(class_parameters)
    cosmo.compute()

    if scaled_by_h:
        k_scale = cosmo.h()
        Pk_scale = cosmo.h()**3
    else:
        k_scale = 1.
        Pk_scale = 1.

    result = np.empty((num_k,), dtype=[('k', float), ('Pk', float)])
    result['k'][:] = np.logspace(np.log10(k_min), np.log10(k_max), num_k)
    for i, k in enumerate(result['k']):
        result['Pk'][i] = cosmo.pk(k * k_scale, z) * Pk_scale

    cosmo.struct_cleanup()
    cosmo.empty()

    return result
Пример #10
0
class classy(SlikPlugin):
    """
    Plugin for CLASS.
    Credit: Brent Follin, Teresa Hamill, Andy Scacco
    """
    
    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()


    def __call__(self,
        **kwargs):


        self.model.set(**kwargs)
        self.model.compute()

        ell = arange(l_max_scalar+1)
        self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max_scalar)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
                           for x in ['TT','TE','EE','BB','PP','TP']}

        self.model.struct_cleanup()
        self.model.empty()
        
        return self.cmb_result

    def get_bao_observables(self, z):
        return {'H':self.model.Hubble(z),
                'D_A':self.model.angular_distance(z),
                'c':1.0,
                'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
Пример #11
0
    def compute_Sigma8(self):

        # compute the values of sigma_8
        # given that we have already assigned
        # the cosmologies
        # this part does not depend on the systematics
        cosmo = Class()
        cosmo.set(self.cosmoParams)

        if self.settings.include_neutrino:
            cosmo.set(self.other_settings)
            cosmo.set(self.neutrino_settings)

        cosmo.set(self.class_argumets)

        try:
            cosmo.compute()

        except CosmoComputationError as failure_message:

            print(failure_message)

            self.sigma_8 = np.nan

        except CosmoSevereError as critical_message:

            print(critical_message)

            self.sigma_8 = np.nan

        sigma_8 = cosmo.sigma8()

        cosmo.struct_cleanup()
        cosmo.empty()

        del cosmo

        return sigma_8
Пример #12
0
    def _run_class(self, **kwargs):
        """Method to run class and return the lensed and unlensed spectra as
        dictionaries.

        Returns
        -------
        cls_l : `dict`
            Dictionary containing the lensed spectra for a given run of CLASS.
        cls_u : `dict`
            Dictionary containing the unlensed spectra for the same run of
            CLASS.

        """
        cosmo = Class()

        # Set some parameters we want that are not in the default CLASS setting.
        class_pars = {
            'output': 'tCl pCl lCl',
            'modes': 's, t',
            'lensing': self.lensing,
            'r': self.r,
        }

        # Update CLASS run with any kwargs that were passed. This is useful in
        # Pyranha.compute_cosmology in order to compute the r=1 case.
        class_pars.update(kwargs)
        cosmo.set(class_pars)
        cosmo.compute()

        # Get the lensed and unlensed spectra as dictionaries.
        cls_l = cosmo.lensed_cl(2500)
        cls_u = cosmo.raw_cl(2500)

        # Do the memory cleanup.
        cosmo.struct_cleanup()
        cosmo.empty()
        return cls_l, cls_u
Пример #13
0
def get_power(params, l_min, l_max):

    #CLASS gives results in natural units
    #convert to muK^2 to match data
    T_cmb = 2.7255e6  #temp in microkelvins
    #create an instance of CLASS wrapper w/correct params
    cosmo = Class()
    cosmo.set(params)
    #cosmo.set({'output':'tCl,pCl,lCl,mPk','lensing':'yes','P_k_max_1/Mpc':3.0})

    cosmo.compute()

    #lensed cl until l=l_max
    output = cosmo.raw_cl(l_max)  #lensed_cl(l_max)
    ls = output['ell'][l_min:]
    Cls = output['tt'][l_min:]

    Dls = ls * (ls + 1) * Cls * T_cmb**2 / (2 * np.pi)

    #clean ups
    cosmo.struct_cleanup()
    cosmo.empty()

    return ls, Cls, Dls
Пример #14
0
# In[ ]:

# get P(k) at redhsift z=0
import numpy as np
kk = np.logspace(-4, np.log10(3), 1000)  # k in h/Mpc
Pk = []  # P(k) in (Mpc/h)**3
h = LambdaCDM.h()  # get reduced Hubble for conversions to 1/Mpc
for k in kk:
    Pk.append(LambdaCDM.pk(k * h, 0.) * h**3)  # function .pk(k,z)

# In[ ]:

# plot P(k)
plt.figure(2)
plt.xscale('log')
plt.yscale('log')
plt.xlim(kk[0], kk[-1])
plt.xlabel(r'$k \,\,\,\, [h/\mathrm{Mpc}]$')
plt.ylabel(r'$P(k) \,\,\,\, [\mathrm{Mpc}/h]^3$')
plt.plot(kk, Pk, 'b-')

# In[ ]:

plt.savefig('warmup_pk.pdf')

# In[ ]:

# optional: reset parameters to default in case you want
# to set different parameters and rerun LambdaCDM.compute()
LambdaCDM.empty()
Пример #15
0
class classy(SlikPlugin):
    """
    Plugin for CLASS.
    Credit: Brent Follin, Teresa Hamill, Andy Scacco
    """

    #{cosmoslik name : class name} - This needs to be done even for variables with the same name (because of for loop in self.model.set)!
    name_mapping = {#'As':'A_s',
                    #'ns':'n_s',
                    #'r':'r',
                    'custom1':'custom1',
                    'custom2':'custom2',
                    'custom3':'custom3',
                    #'nt':'n_t',
                    'ombh2':'omega_b',
                    'omch2':'omega_cdm',
                    'omnuh2':'omega_ncdm',
                    'tau':'tau_reio',
                    'H0':'H0',
                    'massive_neutrinos':'N_ncdm',
                    'massless_neutrinos':'N_ur',
                    'Yp':'YHe',
                    'pivot_scalar':'k_pivot',
                    'omk':'Omega_k',
                    'l_max_scalar':'l_max_scalars',
                    'l_max_tensor':'l_max_tensors',
                    'Tcmb':'T_cmb'
                    }


    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()

    #def __call__(self,
    #             **kwargs):
    
    #    d={}
     #   for k, v in kwargs.iteritems():
      #      if k in self.name_mapping and v is not None:
       #         d[self.name_mapping[k]]=v
        #    else:
         #       d[k]=v
    
    #def __call__(self,
                 #ombh2,
                 #omch2,
                 #H0,
                 #As,
                 #ns,
                 #custom1,
                 #custom2,
                 #custom3,
                 #tau,
                 #w=None,
                 #r=None,
                 #nrun=None,
                 #omk=0,
                 #Yp=None,
                 #Tcmb=2.7255,
                 #massless_neutrinos=3.046,
                 #l_max_scalar=3000,
                 #l_max_tensor=3000,
                 #pivot_scalar=0.05,
                 #outputs=[],
                 #**kwargs):

        #print kwargs
        
    def __call__(self,**kwargs):
        #print kwargs
        #print kwargs['classparamlist']
        #print kwargs['d']
        
        d={}
        for k,v in kwargs.iteritems():
            if k in kwargs['classparamlist']:
                if k in self.name_mapping and v is not None:
                    d[self.name_mapping[k]]=v
                else:
                    d[k]=v
            
        
        #d['P_k_ini type']='external_Pk'
        #d['modes'] = 's,t'
        self.model.set(**d)
                       
        l_max = d['l_max_scalars']
        Tcmb =  d['T_cmb']
        
        #print l_max

        #print d
        
        self.model.compute()

        ell = arange(l_max+1)
        self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
                           for x in ['TT','TE','EE','BB','PP','TP']}

        self.model.struct_cleanup()
        self.model.empty()
        
        return self.cmb_result

    def get_bao_observables(self, z):
        return {'H':self.model.Hubble(z),
                'D_A':self.model.angular_distance(z),
                'c':1.0,
                'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
Пример #16
0
class TestClass(unittest.TestCase):
    """
    Testing Class and its wrapper classy on different cosmologies

    To run it, do
    ~] nosetest test_class.py

    It will run many times Class, on different cosmological scenarios, and
    everytime testing for different output possibilities (none asked, only mPk,
    etc..)

    """
    @classmethod
    def setUpClass(self):
        self.faulty_figs_path = os.path.join(
            os.path.sep.join(
                os.path.realpath(__file__).split(os.path.sep)[:-1]),
            'faulty_figs')

        if os.path.isdir(self.faulty_figs_path):
            shutil.rmtree(self.faulty_figs_path)

        os.mkdir(self.faulty_figs_path)

    @classmethod
    def tearDownClass(self):
        pass

    def setUp(self):
        """
        set up data used in the tests.
        setUp is called before each test function execution.
        """
        self.cosmo = Class()
        self.cosmo_newt = Class()

        self.verbose = {
            'input_verbose': 1,
            'background_verbose': 1,
            'thermodynamics_verbose': 1,
            'perturbations_verbose': 1,
            'transfer_verbose': 1,
            'primordial_verbose': 1,
            'spectra_verbose': 1,
            'nonlinear_verbose': 1,
            'lensing_verbose': 1,
            'output_verbose': 1
        }
        self.scenario = {}

    def tearDown(self):
        self.cosmo.struct_cleanup()
        self.cosmo.empty()
        self.cosmo_newt.struct_cleanup()
        self.cosmo_newt.empty()
        del self.scenario

    def poormansname(self, somedict):
        string = "_".join(
            [k + '=' + str(v) for k, v in list(somedict.items())])
        string = string.replace('/', '%')
        string = string.replace(',', '')
        string = string.replace(' ', '')
        return string

    @parameterized.expand(TUPLE_ARRAY)
    def test_0wrapper_implementation(self, inputdict):
        """Create a few instances based on different cosmologies"""
        self.scenario.update(inputdict)

        self.name = self.poormansname(inputdict)

        sys.stderr.write('\n\n---------------------------------\n')
        sys.stderr.write('| Test case %s |\n' % self.name)
        sys.stderr.write('---------------------------------\n')
        for key, value in list(self.scenario.items()):
            sys.stderr.write("%s = %s\n" % (key, value))
            sys.stdout.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")

        setting = self.cosmo.set(
            dict(list(self.verbose.items()) + list(self.scenario.items())))
        self.assertTrue(setting, "Class failed to initialize with input dict")

        cl_dict = {'tCl': ['tt'], 'lCl': ['pp'], 'pCl': ['ee', 'bb']}
        density_cl_list = ['nCl', 'sCl']

        # 'lensing' is always set to yes. Therefore, trying to compute 'tCl' or
        # 'pCl' will fail except if we also ask for 'lCl'. The flag
        # 'should_fail' stores this status.
        sys.stderr.write('Should')
        should_fail = self.test_incompatible_input()
        if should_fail:
            sys.stderr.write(' fail...\n')
        else:
            sys.stderr.write(' not fail...\n')

        if not should_fail:
            self.cosmo.compute()
        else:
            self.assertRaises(CosmoSevereError, self.cosmo.compute)
            return

        self.assertTrue(self.cosmo.state,
                        "Class failed to go through all __init__ methods")
        if self.cosmo.state:
            print('--> Class is ready')
        # Depending
        if 'output' in list(self.scenario.keys()):
            # Positive tests of raw cls
            output = self.scenario['output']
            for elem in output.split():
                if elem in list(cl_dict.keys()):
                    for cl_type in cl_dict[elem]:
                        sys.stderr.write('--> testing raw_cl for %s\n' %
                                         cl_type)
                        cl = self.cosmo.raw_cl(100)
                        self.assertIsNotNone(cl, "raw_cl returned nothing")
                        self.assertEqual(
                            np.shape(cl[cl_type])[0], 101,
                            "raw_cl returned wrong size")
                    # TODO do the same for lensed if 'lCl' is there, and for
                    # density cl
                if elem == 'mPk':
                    sys.stderr.write('--> testing pk function\n')
                    pk = self.cosmo.pk(0.1, 0)
                    self.assertIsNotNone(pk, "pk returned nothing")
            # Negative tests of output functions
            if not any(
                [elem in list(cl_dict.keys()) for elem in output.split()]):
                sys.stderr.write('--> testing absence of any Cl\n')
                self.assertRaises(CosmoSevereError, self.cosmo.raw_cl, 100)
            if 'mPk' not in output.split():
                sys.stderr.write('--> testing absence of mPk\n')
                self.assertRaises(CosmoSevereError, self.cosmo.pk, 0.1, 0)

        if COMPARE_OUTPUT:
            # Now, compute with Newtonian gauge, and compare the results
            self.cosmo_newt.set(
                dict(list(self.verbose.items()) + list(self.scenario.items())))
            self.cosmo_newt.set({'gauge': 'newtonian'})
            self.cosmo_newt.compute()
            # Check that the computation worked
            self.assertTrue(
                self.cosmo_newt.state,
                "Class failed to go through all __init__ methods in Newtonian gauge"
            )

            self.compare_output(self.cosmo, self.cosmo_newt)

    def test_incompatible_input(self):

        should_fail = False

        # If we have tensor modes, we must have one tensor observable,
        # either tCl or pCl.
        if has_tensor(self.scenario):
            if 'output' not in list(self.scenario.keys()):
                should_fail = True
            else:
                output = self.scenario['output'].split()
                if 'tCl' not in output and 'pCl' not in output:
                    should_fail = True

        # If we have specified lensing, we must have lCl in output,
        # otherwise lensing will not be read (which is an error).
        if 'lensing' in list(self.scenario.keys()):
            if 'output' not in list(self.scenario.keys()):
                should_fail = True
            else:
                output = self.scenario['output'].split()
                if 'lCl' not in output:
                    should_fail = True
                elif 'tCl' not in output and 'pCl' not in output:
                    should_fail = True

        # If we have specified a tensor method, we must have tensors.
        if 'tensor method' in list(self.scenario.keys()):
            if not has_tensor(self.scenario):
                should_fail = True

        # If we have specified non linear, we must have some form of
        # perturbations output.
        if 'non linear' in list(self.scenario.keys()):
            if 'output' not in list(self.scenario.keys()):
                should_fail = True

        # If we ask for Cl's of lensing potential, we must have scalar modes.
        if 'output' in list(self.scenario.keys()
                            ) and 'lCl' in self.scenario['output'].split():
            if 'modes' in list(self.scenario.keys()
                               ) and self.scenario['modes'].find('s') == -1:
                should_fail = True

        # If we specify initial conditions (for scalar modes), we must have
        # perturbations and scalar modes.
        if 'ic' in list(self.scenario.keys()):
            if 'modes' in list(self.scenario.keys()
                               ) and self.scenario['modes'].find('s') == -1:
                should_fail = True
            if 'output' not in list(self.scenario.keys()):
                should_fail = True

        # If we use inflation module, we must have scalar modes,
        # tensor modes, no vector modes and we should only have adiabatic IC:
        if 'P_k_ini type' in list(self.scenario.keys(
        )) and self.scenario['P_k_ini type'].find('inflation') != -1:
            if 'modes' not in list(self.scenario.keys()):
                should_fail = True
            else:
                if self.scenario['modes'].find('s') == -1:
                    should_fail = True
                if self.scenario['modes'].find('v') != -1:
                    should_fail = True
                if self.scenario['modes'].find('t') == -1:
                    should_fail = True
            if 'ic' in list(self.scenario.keys()
                            ) and self.scenario['ic'].find('i') != -1:
                should_fail = True

        return should_fail

    def compare_output(self, reference, candidate):
        sys.stderr.write('\n\n---------------------------------\n')
        sys.stderr.write('| Comparing synch and Newt: |\n')
        sys.stderr.write('---------------------------------\n')

        for elem in ['raw_cl', 'lensed_cl', 'density_cl']:
            # Try to get the elem, but if they were not computed, a
            # CosmoComputeError should be raised. In this case, ignore the
            # whole block.
            try:
                to_test = getattr(candidate, elem)()
            except CosmoSevereError:
                continue
            ref = getattr(reference, elem)()
            for key, value in list(ref.items()):
                if key != 'ell':
                    sys.stderr.write('--> testing equality of %s %s\n' %
                                     (elem, key))
                    # For all self spectra, try to compare allclose
                    if key[0] == key[1]:
                        # If it is a 'dd' or 'll', it is a dictionary.
                        if isinstance(value, dict):
                            for subkey in list(value.keys()):
                                try:
                                    np.testing.assert_allclose(
                                        value[subkey],
                                        to_test[key][subkey],
                                        rtol=1e-03,
                                        atol=1e-20)
                                except AssertionError:
                                    self.cl_faulty_plot(
                                        elem + "_" + key, value[subkey][2:],
                                        to_test[key][subkey][2:])
                                except TypeError:
                                    self.cl_faulty_plot(
                                        elem + "_" + key, value[subkey][2:],
                                        to_test[key][subkey][2:])
                        else:
                            try:
                                np.testing.assert_allclose(value,
                                                           to_test[key],
                                                           rtol=1e-03,
                                                           atol=1e-20)
                            except AssertionError:
                                self.cl_faulty_plot(elem + "_" + key,
                                                    value[2:],
                                                    to_test[key][2:])
                            except TypeError:
                                self.cl_faulty_plot(elem + "_" + key,
                                                    value[2:],
                                                    to_test[key][2:])
                    # For cross-spectra, as there can be zero-crossing, we
                    # instead compare the difference.
                    else:
                        # First, we multiply each array by the biggest value
                        norm = max(
                            np.abs(value).max(),
                            np.abs(to_test[key]).max())
                        value *= norm
                        to_test[key] *= norm
                        try:
                            np.testing.assert_array_almost_equal(value,
                                                                 to_test[key],
                                                                 decimal=3)
                        except AssertionError:
                            self.cl_faulty_plot(elem + "_" + key, value[2:],
                                                to_test[key][2:])

        if 'output' in list(self.scenario.keys()):
            if self.scenario['output'].find('mPk') != -1:
                sys.stderr.write('--> testing equality of Pk')
                k = np.logspace(-2, log10(self.scenario['P_k_max_1/Mpc']))
                reference_pk = np.array([reference.pk(elem, 0) for elem in k])
                candidate_pk = np.array([candidate.pk(elem, 0) for elem in k])
                try:
                    np.testing.assert_allclose(reference_pk,
                                               candidate_pk,
                                               rtol=5e-03,
                                               atol=1e-20)
                except AssertionError:
                    self.pk_faulty_plot(k, reference_pk, candidate_pk)

    def cl_faulty_plot(self, cl_type, reference, candidate):
        path = os.path.join(self.faulty_figs_path, self.name)

        fig = plt.figure()
        ax_lin = plt.subplot(211)
        ax_log = plt.subplot(212)
        ell = np.arange(max(np.shape(candidate))) + 2
        ax_lin.plot(ell, 1 - candidate / reference)
        ax_log.loglog(ell, abs(1 - candidate / reference))

        ax_lin.set_xlabel('l')
        ax_log.set_xlabel('l')
        ax_lin.set_ylabel('1-candidate/reference')
        ax_log.set_ylabel('abs(1-candidate/reference)')

        ax_lin.set_title(self.name)
        ax_log.set_title(self.name)

        ax_lin.legend([cl_type])
        ax_log.legend([cl_type])

        fig.savefig(path + '_' + cl_type + '.pdf')

        # Store parameters (contained in self.scenario) to text file
        parameters = dict(
            list(self.verbose.items()) + list(self.scenario.items()))
        with open(path + '.ini', 'w') as param_file:
            for key, value in list(parameters.items()):
                param_file.write(key + " = " + str(value) + '\n')

    def pk_faulty_plot(self, k, reference, candidate):
        path = os.path.join(self.faulty_figs_path, self.name)

        fig = plt.figure()
        ax_lin = plt.subplot(211)
        ax_log = plt.subplot(212)
        ax_lin.plot(k, 1 - candidate / reference)
        ax_log.loglog(k, abs(1 - candidate / reference))

        ax_lin.set_xlabel('k')
        ax_log.set_xlabel('k')
        ax_lin.set_ylabel('1-candidate/reference')
        ax_log.set_ylabel('abs(1-candidate/reference)')

        ax_lin.set_title(self.name)
        ax_log.set_title(self.name)

        ax_lin.legend('$P_k$')
        ax_log.legend('$P_k$')

        fig.savefig(path + '_' + 'pk' + '.pdf')

        # Store parameters (contained in self.scenario) to text file
        parameters = dict(
            list(self.verbose.items()) + list(self.scenario.items()))
        with open(path + '.ini', 'w') as param_file:
            for key, value in list(parameters.items()):
                param_file.write(key + " = " + str(value) + '\n')
Пример #17
0
class TestClass(unittest.TestCase):
    """
    Testing Class and its wrapper classy on different cosmologies

    To run it, do
    ~] nosetest test_class.py

    It will run many times Class, on different cosmological scenarios, and
    everytime testing for different output possibilities (none asked, only mPk,
    etc..)

    """

    def setUp(self):
        """
        set up data used in the tests.
        setUp is called before each test function execution.
        """
        self.cosmo = Class()

        self.verbose = {
            "input_verbose": 1,
            "background_verbose": 1,
            "thermodynamics_verbose": 1,
            "perturbations_verbose": 1,
            "transfer_verbose": 1,
            "primordial_verbose": 1,
            "spectra_verbose": 1,
            "nonlinear_verbose": 1,
            "lensing_verbose": 1,
            "output_verbose": 1,
        }
        self.scenario = {"lensing": "yes"}

    def tearDown(self):
        self.cosmo.struct_cleanup()
        self.cosmo.empty()
        del self.scenario

    @parameterized.expand(
        itertools.product(
            ("LCDM", "Mnu", "Positive_Omega_k", "Negative_Omega_k", "Isocurvature_modes"),
            (
                {"output": ""},
                {"output": "mPk"},
                {"output": "tCl"},
                {"output": "tCl pCl lCl"},
                {"output": "mPk tCl lCl", "P_k_max_h/Mpc": 10},
                {"output": "nCl sCl"},
                {"output": "tCl pCl lCl nCl sCl"},
            ),
            ({"gauge": "newtonian"}, {"gauge": "sync"}),
            ({}, {"non linear": "halofit"}),
        )
    )
    def test_wrapper_implementation(self, name, scenario, gauge, nonlinear):
        """Create a few instances based on different cosmologies"""
        if name == "Mnu":
            self.scenario.update({"N_ncdm": 1, "m_ncdm": 0.06})
        elif name == "Positive_Omega_k":
            self.scenario.update({"Omega_k": 0.01})
        elif name == "Negative_Omega_k":
            self.scenario.update({"Omega_k": -0.01})
        elif name == "Isocurvature_modes":
            self.scenario.update({"ic": "ad,nid,cdi", "c_ad_cdi": -0.5})

        self.scenario.update(scenario)
        if scenario != {}:
            self.scenario.update(gauge)
        self.scenario.update(nonlinear)

        sys.stderr.write("\n\n---------------------------------\n")
        sys.stderr.write("| Test case %s |\n" % name)
        sys.stderr.write("---------------------------------\n")
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")

        setting = self.cosmo.set(dict(self.verbose.items() + self.scenario.items()))
        self.assertTrue(setting, "Class failed to initialize with input dict")

        cl_list = ["tCl", "lCl", "pCl", "nCl", "sCl"]

        # Depending on the cases, the compute should fail or not
        should_fail = True
        output = self.scenario["output"].split()
        for elem in output:
            if elem in ["tCl", "pCl"]:
                for elem2 in output:
                    if elem2 == "lCl":
                        should_fail = False
                        break

        if not should_fail:
            self.cosmo.compute()
        else:
            self.assertRaises(CosmoSevereError, self.cosmo.compute)
            return

        self.assertTrue(self.cosmo.state, "Class failed to go through all __init__ methods")
        if self.cosmo.state:
            print "--> Class is ready"
        # Depending
        if "output" in self.scenario.keys():
            # Positive tests
            output = self.scenario["output"]
            for elem in output.split():
                if elem in cl_list:
                    print "--> testing raw_cl function"
                    cl = self.cosmo.raw_cl(100)
                    self.assertIsNotNone(cl, "raw_cl returned nothing")
                    self.assertEqual(np.shape(cl["tt"])[0], 101, "raw_cl returned wrong size")
                if elem == "mPk":
                    print "--> testing pk function"
                    pk = self.cosmo.pk(0.1, 0)
                    self.assertIsNotNone(pk, "pk returned nothing")
            # Negative tests of output functions
            if not any([elem in cl_list for elem in output.split()]):
                print "--> testing absence of any Cl"
                self.assertRaises(CosmoSevereError, self.cosmo.raw_cl, 100)
            if "mPk" not in self.scenario["output"].split():
                print "--> testing absence of mPk"
                # args = (0.1, 0)
                self.assertRaises(CosmoSevereError, self.cosmo.pk, 0.1, 0)

    @parameterized.expand(
        itertools.product(("massless", "massive", "both"), ("photons", "massless", "exact"), ("t", "s, t"))
    )
    def test_tensors(self, scenario, method, modes):
        """Test the new tensor mode implementation"""
        self.scenario = {}
        if scenario == "massless":
            self.scenario.update({"N_eff": 3.046, "N_ncdm": 0})
        elif scenario == "massiv":
            self.scenario.update({"N_eff": 0, "N_ncdm": 2, "m_ncdm": "0.03, 0.04", "deg_ncdm": "2, 1"})
        elif scenario == "both":
            self.scenario.update({"N_eff": 1.5, "N_ncdm": 2, "m_ncdm": "0.03, 0.04", "deg_ncdm": "1, 0.5"})

        sys.stderr.write("\n\n---------------------------------\n")
        sys.stderr.write("| Test case: %s %s %s |\n" % (scenario, method, modes))
        sys.stderr.write("---------------------------------\n")
        self.scenario.update({"tensor method": method, "modes": modes, "output": "tCl, pCl"})
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")
        self.cosmo.set(dict(self.verbose.items() + self.scenario.items()))
        self.cosmo.compute()

    @parameterized.expand(itertools.izip(powerset(["100*theta_s", "Omega_dcdmdr"]), powerset([1.04, 0.20])))
    def test_shooting_method(self, variables, values):
        Omega_cdm = 0.25

        scenario = {"Omega_b": 0.05}

        for variable, value in zip(variables, values):
            scenario.update({variable: value})

        if "Omega_dcdmdr" in variables:
            scenario.update({"Gamma_dcdm": 100, "Omega_cdm": Omega_cdm - scenario["Omega_dcdmdr"]})
        else:
            scenario.update({"Omega_cdm": Omega_cdm})

        sys.stderr.write("\n\n---------------------------------\n")
        sys.stderr.write("| Test shooting: %s |\n" % (", ".join(variables)))
        sys.stderr.write("---------------------------------\n")
        for key, value in scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")

        scenario.update(self.verbose)
        self.assertTrue(self.cosmo.set(scenario), "Class failed to initialise with this input")
        self.assertRaises
        self.cosmo.compute()

        # Now, check that the values are properly extracted
        for variable, value in zip(variables, values):
            if variable == "100*theta_s":
                computed_value = self.cosmo.get_current_derived_parameters([variable])[variable]
                self.assertAlmostEqual(value, computed_value, places=5)
Пример #18
0
As_arr = np.linspace(2.1e-9, 2.4e-9, numAs)
dAs = 10e-10

numks = 3
ks_arr = np.linspace(10**(-4), 10**(0), numks)
dks = 10**(-5)

fish_sum = []
full_fish = np.zeros((numAs, numAs))

for i in np.arange(numAs):
    cosmop = Class()
    cosmop.set(params)
    cosmop.set({'A_s': As_arr[i] * (1 + dAs)})
    cosmop.compute()
    cosmop.empty()
    cosmom = Class()
    cosmom.set(params)
    cosmom.set({'A_s': As_arr[i] * (1 - dAs)})
    cosmom.compute()
    cosmom.empty()
    dTTi = (np.array(factor * cosmop.raw_cl(lmax)['tt'][2:]) -
            np.array(factor * cosmom.raw_cl(lmax)['tt'][2:])) / (2 * dAs)
    dEEi = (np.array(factor * cosmop.raw_cl(lmax)['ee'][2:]) -
            np.array(factor * cosmom.raw_cl(lmax)['ee'][2:])) / (2 * dAs)
    dTEi = (np.array(factor * cosmop.raw_cl(lmax)['te'][2:]) -
            np.array(factor * cosmom.raw_cl(lmax)['te'][2:])) / (2 * dAs)
    for j in range(i, numAs):
        cosmop = Class()
        cosmop.set(params)
        cosmop.set({'A_s': As_arr[j] * (1 + dAs)})
Пример #19
0
class Sampler:
    def __init__(self, NSIDE):
        self.NSIDE = NSIDE
        self.Npix = 12 * NSIDE**2
        print("Initialising sampler")
        self.cosmo = Class()
        print("Maps")
        self.templates_map, self.templates_var = aggregate_pixels_params(
            get_pixels_params(self.NSIDE))
        print("betas")
        self.matrix_mean, self.matrix_var = aggregate_mixing_params(
            get_mixing_matrix_params(self.NSIDE))
        print("Cosmo params")
        self.cosmo_means = np.array(COSMO_PARAMS_MEANS)
        self.cosmo_var = (np.diag(COSMO_PARAMS_SIGMA) / 2)**2

        plt.hist(self.templates_map)
        plt.savefig("mean_values.png")
        plt.close()
        plt.hist(self.templates_var)
        plt.savefig("std_values.png")
        plt.close()
        self.instrument = pysm.Instrument(
            get_instrument('litebird', self.NSIDE))
        self.components = [CMB(), Dust(150.), Synchrotron(150.)]
        self.mixing_matrix = MixingMatrix(*self.components)
        self.mixing_matrix_evaluator = self.mixing_matrix.evaluator(
            self.instrument.Frequencies)
        print("End of initialisation")

    def __getstate__(self):
        state_dict = self.__dict__.copy()
        del state_dict["mixing_matrix_evaluator"]
        del state_dict["cosmo"]
        del state_dict["mixing_matrix"]
        del state_dict["components"]
        return state_dict

    def __setstate__(self, state):
        self.__dict__.update(state)
        self.cosmo = Class()
        self.components = [CMB(), Dust(150.), Synchrotron(150.)]
        self.mixing_matrix = MixingMatrix(*self.components)
        self.mixing_matrix_evaluator = self.mixing_matrix.evaluator(
            self.instrument.Frequencies)

    def sample_normal(self, mu, sigma, s=None):
        return np.random.multivariate_normal(mu, sigma, s)

    def sample_model_parameters(self):
        #sampled_cosmo = self.sample_normal(self.cosmo_means, self.cosmo_var)
        sampled_cosmo = np.array([
            0.9665, 0.02242, 0.11933, 1.04101, 3.047, 0.0561
        ]) - 2 * np.array(COSMO_PARAMS_SIGMA)
        #sampled_beta = self.sample_normal(self.matrix_mean, self.matrix_var).reshape((self.Npix, -1), order = "F")
        sampled_beta = self.matrix_mean.reshape((self.Npix, -1), order="F")
        return sampled_cosmo, sampled_beta

    def sample_CMB_QU(self, cosmo_params):
        params = {
            'output': OUTPUT_CLASS,
            'l_max_scalars': L_MAX_SCALARS,
            'lensing': LENSING
        }
        params.update(cosmo_params)
        self.cosmo.set(params)
        self.cosmo.compute()
        cls = self.cosmo.lensed_cl(L_MAX_SCALARS)
        eb_tb = np.zeros(shape=cls["tt"].shape)
        _, Q, U = hp.synfast(
            (cls['tt'], cls['ee'], cls['bb'], cls['te'], eb_tb, eb_tb),
            nside=self.NSIDE,
            new=True)
        self.cosmo.struct_cleanup()
        self.cosmo.empty()
        return Q, U

    def sample_mixing_matrix(self, betas):
        mat_pixels = []
        for i in range(self.Npix):
            m = self.mixing_matrix_evaluator(betas[i, :])
            mat_pixels.append(m)

        mixing_matrix = np.stack(mat_pixels, axis=0)
        return mixing_matrix

    def sample_model(self):
        cosmo_params, sampled_beta = self.sample_model_parameters()
        #maps = self.sample_normal(self.templates_map, self.templates_var)

        cosmo_dict = {
            l[0]: l[1]
            for l in zip(COSMO_PARAMS_NAMES, cosmo_params.tolist())
        }
        tuple_QU = self.sample_CMB_QU(cosmo_dict)
        map_CMB = np.stack(tuple_QU, axis=1)
        '''
        mixing_matrix = self.sample_mixing_matrix(sampled_beta)
        map_Sync = np.stack([maps[0:self.Npix], maps[self.Npix:2*self.Npix]], axis = 1)
        map_Dust = np.stack([maps[2*self.Npix:3*self.Npix], maps[3*self.Npix:]], axis = 1)
        entire_map = np.stack([map_CMB, map_Dust, map_Sync], axis = 1)

        dot_prod = []
        for j in range(self.Npix):
            m = np.dot(mixing_matrix[j, :, :], entire_map[j, :, :])
            dot_prod.append(m)

        sky_map = np.stack(dot_prod, axis = 0)
        '''
        sky_map = map_CMB

        return {
            "sky_map": sky_map,
            "cosmo_params": cosmo_params,
            "betas": sampled_beta
        }


#sampler = Sampler(NSIDE)
#r = sampler.sample_model(1)
#['beta_d' 'temp' 'beta_pl']
#['beta_d' 'temp']
#['beta_pl']
Пример #20
0
kk = np.logspace(-4,np.log10(3),1000) # k in h/Mpc
Pk = [] # P(k) in (Mpc/h)**3
h = LambdaCDM.h() # get reduced Hubble for conversions to 1/Mpc
for k in kk:
    Pk.append(LambdaCDM.pk(k*h,0.)*h**3) # function .pk(k,z)


# In[ ]:

# plot P(k)
plt.figure(2)
plt.xscale('log');plt.yscale('log');plt.xlim(kk[0],kk[-1])
plt.xlabel(r'$k \,\,\,\, [h/\mathrm{Mpc}]$')
plt.ylabel(r'$P(k) \,\,\,\, [\mathrm{Mpc}/h]^3$')
plt.plot(kk,Pk,'b-')


# In[ ]:

plt.savefig('warmup_pk.pdf')


# In[ ]:

# optional: clear content of LambdaCDM (to reuse it for another model)
LambdaCDM.struct_cleanup()
# optional: reset parameters to default
LambdaCDM.empty()

# In[ ]:
Пример #21
0
    def fitEE(self):

        # function to cimpute the band powers

        cosmo = Class()
        cosmo.set(self.cosmoParams)

        if self.settings.include_neutrino:
            cosmo.set(self.other_settings)
            cosmo.set(self.neutrino_settings)

        cosmo.set(self.class_argumets)

        try:
            cosmo.compute()

        except CosmoComputationError as failure_message:

            print(failure_message)

            self.sigma_8 = np.nan

            cosmo.struct_cleanup()
            cosmo.empty()

            return np.array([np.nan] * self.nzcorrs * self.bo_EE), np.array(
                [np.nan] * self.nzcorrs * self.bo_EE), np.array(
                    [np.nan] * self.nzcorrs * self.bo_EE)

        except CosmoSevereError as critical_message:

            print(critical_message)

            self.sigma_8 = np.nan

            cosmo.struct_cleanup()
            cosmo.empty()

            return np.array([np.nan] * self.nzcorrs * self.bo_EE), np.array(
                [np.nan] * self.nzcorrs * self.bo_EE), np.array(
                    [np.nan] * self.nzcorrs * self.bo_EE)

        # retrieve Omega_m and h from cosmo (CLASS)
        self.Omega_m = cosmo.Omega_m()
        self.small_h = cosmo.h()

        self.rho_crit = self.get_critical_density()

        # derive the linear growth factor D(z)
        linear_growth_rate = np.zeros_like(self.redshifts)
        # print self.redshifts
        for index_z, z in enumerate(self.redshifts):
            try:
                # for CLASS ver >= 2.6:
                linear_growth_rate[
                    index_z] = cosmo.scale_independent_growth_factor(z)
            except BaseException:
                # my own function from private CLASS modification:
                linear_growth_rate[index_z] = cosmo.growth_factor_at_z(z)
        # normalize to unity at z=0:
        try:
            # for CLASS ver >= 2.6:
            linear_growth_rate /= cosmo.scale_independent_growth_factor(0.)
        except BaseException:
            # my own function from private CLASS modification:
            linear_growth_rate /= cosmo.growth_factor_at_z(0.)

        # get distances from cosmo-module:
        r, dzdr = cosmo.z_of_r(self.redshifts)

        self.sigma_8 = cosmo.sigma8()

        # Get power spectrum P(k=l/r,z(r)) from cosmological module
        # this doesn't really have to go into the loop over fields!
        pk = np.zeros((self.settings.nellsmax, self.settings.nzmax), 'float64')
        k_max_in_inv_Mpc = self.settings.k_max_h_by_Mpc * self.small_h

        # note that this is being computed at only nellsmax
        # followed by an interplation
        record = np.zeros((self.settings.nellsmax, self.settings.nzmax))
        record_k = np.zeros((self.settings.nellsmax, self.settings.nzmax))
        for index_ells in range(self.settings.nellsmax):
            for index_z in range(1, self.settings.nzmax):
                k_in_inv_Mpc = (self.ells[index_ells] + 0.5) / r[index_z]
                z = self.redshifts[index_z]

                record[index_ells, index_z] = self.baryon_feedback_bias_sqr(
                    k_in_inv_Mpc / self.small_h,
                    self.redshifts[index_z],
                    A_bary=self.systematics['A_bary'])
                record_k[index_ells, index_z] = k_in_inv_Mpc

        self.record_bf = record[:, 1:].flatten()
        self.record_k = record_k[:, 1:].flatten()
        self.k_max_in_inv_Mpc = k_max_in_inv_Mpc

        for index_ells in range(self.settings.nellsmax):
            for index_z in range(1, self.settings.nzmax):
                # standard Limber approximation:
                # k = ells[index_ells] / r[index_z]
                # extended Limber approximation (cf. LoVerde & Afshordi 2008):
                k_in_inv_Mpc = (self.ells[index_ells] + 0.5) / r[index_z]
                if k_in_inv_Mpc > k_max_in_inv_Mpc:
                    pk_dm = 0.
                else:
                    pk_dm = cosmo.pk(k_in_inv_Mpc, self.redshifts[index_z])
                # pk[index_ells,index_z] = cosmo.pk(ells[index_ells]/r[index_z], self.redshifts[index_z])
                if self.settings.baryon_feedback:
                    pk[index_ells,
                       index_z] = pk_dm * self.baryon_feedback_bias_sqr(
                           k_in_inv_Mpc / self.small_h,
                           self.redshifts[index_z],
                           A_bary=self.systematics['A_bary'])
                else:
                    pk[index_ells, index_z] = pk_dm

    # for KiDS-450 constant biases in photo-z are not sufficient:
        if self.settings.bootstrap_photoz_errors:
            # draw a random bootstrap n(z); borders are inclusive!
            random_index_bootstrap = np.random.randint(
                int(self.settings.index_bootstrap_low),
                int(self.settings.index_bootstrap_high) + 1)
            # print 'Bootstrap index:', random_index_bootstrap
            pz = np.zeros((self.settings.nzmax, self.nzbins), 'float64')
            pz_norm = np.zeros(self.nzbins, 'float64')

            for zbin in range(self.nzbins):

                redshift_bin = self.redshift_bins[zbin]
                # ATTENTION: hard-coded subfolder!
                # index can be recycled since bootstraps for tomographic bins
                # are independent!
                fname = os.path.join(
                    self.settings.data_directory,
                    '{:}/bootstraps/{:}/n_z_avg_bootstrap{:}.hist'.format(
                        self.settings.photoz_method, redshift_bin,
                        random_index_bootstrap))
                z_hist, n_z_hist = np.loadtxt(fname, unpack=True)

                shift_to_midpoint = np.diff(z_hist)[0] / 2.
                spline_pz = itp.splrep(z_hist + shift_to_midpoint, n_z_hist)
                mask_min = self.redshifts >= z_hist.min() + shift_to_midpoint
                mask_max = self.redshifts <= z_hist.max() + shift_to_midpoint
                mask = mask_min & mask_max
                # points outside the z-range of the histograms are set to 0!
                pz[mask, zbin] = itp.splev(self.redshifts[mask], spline_pz)

                dz = self.redshifts[1:] - self.redshifts[:-1]
                pz_norm[zbin] = np.sum(0.5 * (pz[1:, zbin] + pz[:-1, zbin]) *
                                       dz)

            pr = pz * (dzdr[:, np.newaxis] / pz_norm)

        else:
            pr = self.pz * (dzdr[:, np.newaxis] / self.pz_norm)
            # pr[pr < 0] = 0

        g = np.zeros((self.settings.nzmax, self.nzbins), 'float64')

        for zbin in range(self.nzbins):
            # assumes that z[0] = 0
            for nr in range(1, self.settings.nzmax - 1):
                # for nr in range(self.nzmax - 1):
                fun = pr[nr:, zbin] * (r[nr:] - r[nr]) / r[nr:]
                g[nr, zbin] = np.sum(0.5 * (fun[1:] + fun[:-1]) *
                                     (r[nr + 1:] - r[nr:-1]))
                g[nr, zbin] *= 2. * r[nr] * (1. + self.redshifts[nr])

        # Start loop over l for computation of C_l^shear
        Cl_GG_integrand = np.zeros(
            (self.settings.nzmax, self.nzbins, self.nzbins), 'float64')
        Cl_GG = np.zeros((self.settings.nellsmax, self.nzbins, self.nzbins),
                         'float64')

        Cl_II_integrand = np.zeros_like(Cl_GG_integrand)
        Cl_II = np.zeros_like(Cl_GG)

        Cl_GI_integrand = np.zeros_like(Cl_GG_integrand)
        Cl_GI = np.zeros_like(Cl_GG)

        dr = r[1:] - r[:-1]
        for index_ell in range(self.settings.nellsmax):

            # find Cl_integrand = (g(r) / r)**2 * P(l/r,z(r))
            for zbin1 in range(self.nzbins):
                for zbin2 in range(zbin1 + 1):  # self.nzbins):
                    Cl_GG_integrand[1:, zbin1, zbin2] = g[1:, zbin1] * \
                        g[1:, zbin2] / r[1:]**2 * pk[index_ell, 1:]

                    factor_IA = self.get_factor_IA(
                        self.redshifts[1:], linear_growth_rate[1:],
                        self.systematics['A_IA'])  # / self.dzdr[1:]
                    # print F_of_x
                    # print self.eta_r[1:, zbin1].shape
                    Cl_II_integrand[1:, zbin1, zbin2] = pr[1:, zbin1] * \
                        pr[1:, zbin2] * factor_IA**2 / r[1:]**2 * pk[index_ell, 1:]
                    pref = g[1:, zbin1] * pr[1:, zbin2] + g[1:, zbin2] * pr[
                        1:, zbin1]
                    Cl_GI_integrand[1:, zbin1,
                                    zbin2] = pref * factor_IA / r[1:]**2 * pk[
                                        index_ell, 1:]

            # Integrate over r to get C_l^shear_ij = P_ij(l)
            # C_l^shear_ii = 9/4 Omega0_m^2 H_0^4 \sum_0^rmax dr (g_i(r) g_j(r)
            # /r**2) P(k=l/r,z(r))
            for zbin1 in range(self.nzbins):
                for zbin2 in range(zbin1 + 1):  # self.nzbins):
                    Cl_GG[index_ell, zbin1, zbin2] = np.sum(
                        0.5 * (Cl_GG_integrand[1:, zbin1, zbin2] +
                               Cl_GG_integrand[:-1, zbin1, zbin2]) * dr)
                    # here we divide by 16, because we get a 2^2 from g(z)!
                    Cl_GG[index_ell, zbin1, zbin2] *= 9. / 16. * \
                        self.Omega_m**2  # in units of Mpc**4
                    # dimensionless
                    Cl_GG[index_ell, zbin1,
                          zbin2] *= (self.small_h / 2997.9)**4

                    Cl_II[index_ell, zbin1, zbin2] = np.sum(
                        0.5 * (Cl_II_integrand[1:, zbin1, zbin2] +
                               Cl_II_integrand[:-1, zbin1, zbin2]) * dr)

                    Cl_GI[index_ell, zbin1, zbin2] = np.sum(
                        0.5 * (Cl_GI_integrand[1:, zbin1, zbin2] +
                               Cl_GI_integrand[:-1, zbin1, zbin2]) * dr)
                    # here we divide by 4, because we get a 2 from g(r)!
                    Cl_GI[index_ell, zbin1, zbin2] *= 3. / 4. * self.Omega_m
                    Cl_GI[index_ell, zbin1,
                          zbin2] *= (self.small_h / 2997.9)**2

        # ordering of redshift bins is correct in definition of theory below!
        theory_EE_GG = np.zeros((self.nzcorrs, self.bo_EE), 'float64')
        theory_EE_II = np.zeros((self.nzcorrs, self.bo_EE), 'float64')
        theory_EE_GI = np.zeros((self.nzcorrs, self.bo_EE), 'float64')

        index_corr = 0
        # A_noise_corr = np.zeros(self.nzcorrs)
        for zbin1 in range(self.nzbins):
            for zbin2 in range(zbin1 + 1):  # self.nzbins):
                # correlation = 'z{:}z{:}'.format(zbin1 + 1, zbin2 + 1)
                # print(zbin1, zbin2)

                Cl_sample_GG = Cl_GG[:, zbin1, zbin2]
                spline_Cl_GG = itp.splrep(self.ells, Cl_sample_GG)
                D_l_EE_GG = self.ell_norm * \
                    itp.splev(self.ells_sum, spline_Cl_GG)

                theory_EE_GG[index_corr, :] = self.get_theory(
                    self.ells_sum,
                    D_l_EE_GG,
                    self.band_window_matrix,
                    index_corr,
                    band_type_is_EE=True)

                Cl_sample_GI = Cl_GI[:, zbin1, zbin2]
                spline_Cl_GI = itp.splrep(self.ells, Cl_sample_GI)
                D_l_EE_GI = self.ell_norm * \
                    itp.splev(self.ells_sum, spline_Cl_GI)
                theory_EE_GI[index_corr, :] = self.get_theory(
                    self.ells_sum,
                    D_l_EE_GI,
                    self.band_window_matrix,
                    index_corr,
                    band_type_is_EE=True)

                Cl_sample_II = Cl_II[:, zbin1, zbin2]
                spline_Cl_II = itp.splrep(self.ells, Cl_sample_II)
                D_l_EE_II = self.ell_norm * \
                    itp.splev(self.ells_sum, spline_Cl_II)
                theory_EE_II[index_corr, :] = self.get_theory(
                    self.ells_sum,
                    D_l_EE_II,
                    self.band_window_matrix,
                    index_corr,
                    band_type_is_EE=True)

                index_corr += 1

        cosmo.struct_cleanup()
        cosmo.empty()

        return theory_EE_GG.flatten(), theory_EE_GI.flatten(
        ), theory_EE_II.flatten()
Пример #22
0
def m_Pk(k=np.logspace(-3, 0., 100), z=0.53, nl_model='trg'):
    print k
    cosmo = Class()

    CLASS_INPUT = {}

    CLASS_INPUT['Mnu'] = ([{
        'N_eff': 0.0,
        'N_ncdm': 1,
        'm_ncdm': 0.06,
        'deg_ncdm': 3.0
    }], 'normal')
    CLASS_INPUT['Output_spectra'] = ([{
        'output': 'mPk',
        'P_k_max_1/Mpc': 1,
        'z_pk': z
    }], 'power')

    CLASS_INPUT['Nonlinear'] = ([{'non linear': nl_model}], 'power')

    verbose = {}
    #    'input_verbose': 1,
    #    'background_verbose': 1,
    #    'thermodynamics_verbose': 1,
    #    'perturbations_verbose': 1,
    #    'transfer_verbose': 1,
    #    'primordial_verbose': 1,
    #    'spectra_verbose': 1,
    #    'nonlinear_verbose': 1,
    #    'lensing_verbose': 1,
    #    'output_verbose': 1
    #    }

    cosmo.struct_cleanup()
    cosmo.empty()

    INPUTPOWER = []
    INPUTNORMAL = [{}]
    for key, value in CLASS_INPUT.iteritems():
        models, state = value
        if state == 'power':
            INPUTPOWER.append([{}] + models)
        else:
            INPUTNORMAL.extend(models)

        PRODPOWER = list(itertools.product(*INPUTPOWER))

        DICTARRAY = []
        for normelem in INPUTNORMAL:
            for powelem in PRODPOWER:  # itertools.product(*modpower):
                temp_dict = normelem.copy()
                for elem in powelem:
                    temp_dict.update(elem)
                DICTARRAY.append(temp_dict)

    scenario = {}
    for dic in DICTARRAY:
        scenario.update(dic)
    setting = cosmo.set(dict(verbose.items() + scenario.items()))
    cosmo.compute()
    pk_out = []
    for k_i in k:
        pk_out.append(cosmo.pk(k_i, z))
    return pk_out
Пример #23
0
class classy(SlikPlugin):
    """
    Plugin for CLASS.

    Credit: Brent Follin, Teresa Hamill
    """

    #{cosmoslik name : class name}
    name_mapping = {'As':'A_s',
                    'ns':'n_s',
                    'r':'r',
                    'nt':'n_t',
                    'ombh2':'omega_b',
                    'omch2':'omega_cdm',
                    'omnuh2':'omega_ncdm',
                    'tau':'tau_reio',
                    'H0':'H0',
                    'massive_neutrinos':'N_ncdm',
                    'massless_neutrinos':'N_ur',
                    'Yp':'YHe',
                    'pivot_scalar':'k_pivot'}


    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()


    def __call__(self,
                 ombh2,
                 omch2,
                 H0,
                 As,
                 ns,
                 tau,
                 omnuh2, #0.006
                 w=None,
                 r=None,
                 nrun=None,
                 omk=0,
                 Yp=None,
                 Tcmb=2.7255,
                 massive_neutrinos=1,
                 massless_neutrinos=2.046,
                 l_max_scalar=3000,
                 l_max_tensor=3000,
                 pivot_scalar=0.002,
                 outputs=[],
                 **kwargs):


        
        self.model.set(output='tCl, lCl, pCl',
                       lensing='yes',
                       l_max_scalars=l_max_scalar,
                       **{self.name_mapping[k]:v for k,v in locals().items() 
                          if k in self.name_mapping and v is not None})
        self.model.compute()

        ell = arange(l_max_scalar+1)
        self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max_scalar)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
                           for x in ['TT','TE','EE','BB','PP','TP']}

        self.model.struct_cleanup()
        self.model.empty()
        
        return self.cmb_result

    def get_bao_observables(self, z):
        return {'H':self.model.Hubble(z),
                'D_A':self.model.angular_distance(z),
                'c':1.0,
                'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
	    'custom3': 0,
	    'custom4': 0,
	    'custom5': 0}

#Get the unperturbed cls for comparison
cosmo = Class()
cosmo.set(params)
cosmo.compute()
clso=cosmo.lensed_cl(2508)['tt'][30:]
ell = cosmo.lensed_cl(2508)['ell'][30:]

for i in range(len(clso)):
	clso[i]=ell[i]*(ell[i]+1)/(4*np.pi)*((2.726e6)**2)*clso[i]
a=np.zeros(5)
cosmo.struct_cleanup()
cosmo.empty()
dcls=np.zeros([clso.shape[0],5])
h=1e-6
for m in range(5):
	a[m]=h
	# Define your cosmology (what is not specified will be set to CLASS default parameters)
	params = {
	    'output': 'tCl lCl',
	    'l_max_scalars': 2508,
	    'lensing': 'yes',
	    'P_k_ini type': 'external_Pk',
	    'command': 'python /home/andrew/Research/tools/class_public-2.4.3/external_Pk/generate_Pk_cosines.py',
	    'custom1': a[0],
	    'custom2': a[1],
	    'custom3': a[2],
	    'custom4': a[3],
Пример #25
0
class TestClass(unittest.TestCase):
    """
    Testing Class and its wrapper classy on different cosmologies

    To run it, do
    ~] nosetest test_class.py

    It will run many times Class, on different cosmological scenarios, and
    everytime testing for different output possibilities (none asked, only mPk,
    etc..)

    """

    @classmethod
    def setUpClass(self):
        self.faulty_figs_path = os.path.join(
            os.path.sep.join(os.path.realpath(__file__).split(os.path.sep)[:-1]), "faulty_figs"
        )

        if os.path.isdir(self.faulty_figs_path):
            shutil.rmtree(self.faulty_figs_path)

        os.mkdir(self.faulty_figs_path)

    @classmethod
    def tearDownClass(self):
        pass

    def setUp(self):
        """
        set up data used in the tests.
        setUp is called before each test function execution.
        """
        self.cosmo = Class()
        self.cosmo_newt = Class()

        self.verbose = {
            "input_verbose": 1,
            "background_verbose": 1,
            "thermodynamics_verbose": 1,
            "perturbations_verbose": 1,
            "transfer_verbose": 1,
            "primordial_verbose": 1,
            "spectra_verbose": 1,
            "nonlinear_verbose": 1,
            "lensing_verbose": 1,
            "output_verbose": 1,
        }
        self.scenario = {}

    def tearDown(self):
        self.cosmo.struct_cleanup()
        self.cosmo.empty()
        self.cosmo_newt.struct_cleanup()
        self.cosmo_newt.empty()
        del self.scenario

    def poormansname(self, somedict):
        string = "_".join([k + "=" + str(v) for k, v in somedict.iteritems()])
        string = string.replace("/", "%")
        string = string.replace(",", "")
        string = string.replace(" ", "")
        return string

    @parameterized.expand(TUPLE_ARRAY)
    def test_0wrapper_implementation(self, inputdict):
        """Create a few instances based on different cosmologies"""
        self.scenario.update(inputdict)

        self.name = self.poormansname(inputdict)

        sys.stderr.write("\n\n---------------------------------\n")
        sys.stderr.write("| Test case %s |\n" % self.name)
        sys.stderr.write("---------------------------------\n")
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
            sys.stdout.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")

        setting = self.cosmo.set(dict(self.verbose.items() + self.scenario.items()))
        self.assertTrue(setting, "Class failed to initialize with input dict")

        cl_dict = {"tCl": ["tt"], "lCl": ["pp"], "pCl": ["ee", "bb"]}
        density_cl_list = ["nCl", "sCl"]

        # 'lensing' is always set to yes. Therefore, trying to compute 'tCl' or
        # 'pCl' will fail except if we also ask for 'lCl'. The flag
        # 'should_fail' stores this status.
        sys.stderr.write("Should")
        should_fail = self.test_incompatible_input()
        if should_fail:
            sys.stderr.write(" fail...\n")
        else:
            sys.stderr.write(" not fail...\n")

        if not should_fail:
            self.cosmo.compute()
        else:
            self.assertRaises(CosmoSevereError, self.cosmo.compute)
            return

        self.assertTrue(self.cosmo.state, "Class failed to go through all __init__ methods")
        if self.cosmo.state:
            print "--> Class is ready"
        # Depending
        if "output" in self.scenario.keys():
            # Positive tests of raw cls
            output = self.scenario["output"]
            for elem in output.split():
                if elem in cl_dict.keys():
                    for cl_type in cl_dict[elem]:
                        sys.stderr.write("--> testing raw_cl for %s\n" % cl_type)
                        cl = self.cosmo.raw_cl(100)
                        self.assertIsNotNone(cl, "raw_cl returned nothing")
                        self.assertEqual(np.shape(cl[cl_type])[0], 101, "raw_cl returned wrong size")
                    # TODO do the same for lensed if 'lCl' is there, and for
                    # density cl
                if elem == "mPk":
                    sys.stderr.write("--> testing pk function\n")
                    pk = self.cosmo.pk(0.1, 0)
                    self.assertIsNotNone(pk, "pk returned nothing")
            # Negative tests of output functions
            if not any([elem in cl_dict.keys() for elem in output.split()]):
                sys.stderr.write("--> testing absence of any Cl\n")
                self.assertRaises(CosmoSevereError, self.cosmo.raw_cl, 100)
            if "mPk" not in output.split():
                sys.stderr.write("--> testing absence of mPk\n")
                self.assertRaises(CosmoSevereError, self.cosmo.pk, 0.1, 0)

        if COMPARE_OUTPUT:
            # Now, compute with Newtonian gauge, and compare the results
            self.cosmo_newt.set(dict(self.verbose.items() + self.scenario.items()))
            self.cosmo_newt.set({"gauge": "newtonian"})
            self.cosmo_newt.compute()
            # Check that the computation worked
            self.assertTrue(self.cosmo_newt.state, "Class failed to go through all __init__ methods in Newtonian gauge")

            self.compare_output(self.cosmo, self.cosmo_newt)

    def test_incompatible_input(self):

        should_fail = False

        # If we have tensor modes, we must have one tensor observable,
        # either tCl or pCl.
        if has_tensor(self.scenario):
            if "output" not in self.scenario.keys():
                should_fail = True
            else:
                output = self.scenario["output"].split()
                if "tCl" not in output and "pCl" not in output:
                    should_fail = True

        # If we have specified lensing, we must have lCl in output,
        # otherwise lensing will not be read (which is an error).
        if "lensing" in self.scenario.keys():
            if "output" not in self.scenario.keys():
                should_fail = True
            else:
                output = self.scenario["output"].split()
                if "lCl" not in output:
                    should_fail = True
                elif "tCl" not in output and "pCl" not in output:
                    should_fail = True

        # If we have specified a tensor method, we must have tensors.
        if "tensor method" in self.scenario.keys():
            if not has_tensor(self.scenario):
                should_fail = True

        # If we have specified non linear, we must have some form of
        # perturbations output.
        if "non linear" in self.scenario.keys():
            if "output" not in self.scenario.keys():
                should_fail = True

        # If we ask for Cl's of lensing potential, we must have scalar modes.
        if "output" in self.scenario.keys() and "lCl" in self.scenario["output"].split():
            if "modes" in self.scenario.keys() and self.scenario["modes"].find("s") == -1:
                should_fail = True

        # If we specify initial conditions (for scalar modes), we must have
        # perturbations and scalar modes.
        if "ic" in self.scenario.keys():
            if "modes" in self.scenario.keys() and self.scenario["modes"].find("s") == -1:
                should_fail = True
            if "output" not in self.scenario.keys():
                should_fail = True

        # If we use inflation module, we must have scalar modes,
        # tensor modes, no vector modes and we should only have adiabatic IC:
        if "P_k_ini type" in self.scenario.keys() and self.scenario["P_k_ini type"].find("inflation") != -1:
            if "modes" not in self.scenario.keys():
                should_fail = True
            else:
                if self.scenario["modes"].find("s") == -1:
                    should_fail = True
                if self.scenario["modes"].find("v") != -1:
                    should_fail = True
                if self.scenario["modes"].find("t") == -1:
                    should_fail = True
            if "ic" in self.scenario.keys() and self.scenario["ic"].find("i") != -1:
                should_fail = True

        return should_fail

    def compare_output(self, reference, candidate):
        sys.stderr.write("\n\n---------------------------------\n")
        sys.stderr.write("| Comparing synch and Newt: |\n")
        sys.stderr.write("---------------------------------\n")

        for elem in ["raw_cl", "lensed_cl", "density_cl"]:
            # Try to get the elem, but if they were not computed, a
            # CosmoComputeError should be raised. In this case, ignore the
            # whole block.
            try:
                to_test = getattr(candidate, elem)()
            except CosmoSevereError:
                continue
            ref = getattr(reference, elem)()
            for key, value in ref.iteritems():
                if key != "ell":
                    sys.stderr.write("--> testing equality of %s %s\n" % (elem, key))
                    # For all self spectra, try to compare allclose
                    if key[0] == key[1]:
                        # If it is a 'dd' or 'll', it is a dictionary.
                        if isinstance(value, dict):
                            for subkey in value.iterkeys():
                                try:
                                    np.testing.assert_allclose(
                                        value[subkey], to_test[key][subkey], rtol=1e-03, atol=1e-20
                                    )
                                except AssertionError:
                                    self.cl_faulty_plot(elem + "_" + key, value[subkey][2:], to_test[key][subkey][2:])
                                except TypeError:
                                    self.cl_faulty_plot(elem + "_" + key, value[subkey][2:], to_test[key][subkey][2:])
                        else:
                            try:
                                np.testing.assert_allclose(value, to_test[key], rtol=1e-03, atol=1e-20)
                            except AssertionError:
                                self.cl_faulty_plot(elem + "_" + key, value[2:], to_test[key][2:])
                            except TypeError:
                                self.cl_faulty_plot(elem + "_" + key, value[2:], to_test[key][2:])
                    # For cross-spectra, as there can be zero-crossing, we
                    # instead compare the difference.
                    else:
                        # First, we multiply each array by the biggest value
                        norm = max(np.abs(value).max(), np.abs(to_test[key]).max())
                        value *= norm
                        to_test[key] *= norm
                        try:
                            np.testing.assert_array_almost_equal(value, to_test[key], decimal=3)
                        except AssertionError:
                            self.cl_faulty_plot(elem + "_" + key, value[2:], to_test[key][2:])

        if "output" in self.scenario.keys():
            if self.scenario["output"].find("mPk") != -1:
                sys.stderr.write("--> testing equality of Pk")
                k = np.logspace(-2, log10(self.scenario["P_k_max_1/Mpc"]))
                reference_pk = np.array([reference.pk(elem, 0) for elem in k])
                candidate_pk = np.array([candidate.pk(elem, 0) for elem in k])
                try:
                    np.testing.assert_allclose(reference_pk, candidate_pk, rtol=5e-03, atol=1e-20)
                except AssertionError:
                    self.pk_faulty_plot(k, reference_pk, candidate_pk)

    def cl_faulty_plot(self, cl_type, reference, candidate):
        path = os.path.join(self.faulty_figs_path, self.name)

        fig = plt.figure()
        ax_lin = plt.subplot(211)
        ax_log = plt.subplot(212)
        ell = np.arange(max(np.shape(candidate))) + 2
        ax_lin.plot(ell, 1 - candidate / reference)
        ax_log.loglog(ell, abs(1 - candidate / reference))

        ax_lin.set_xlabel("l")
        ax_log.set_xlabel("l")
        ax_lin.set_ylabel("1-candidate/reference")
        ax_log.set_ylabel("abs(1-candidate/reference)")

        ax_lin.set_title(self.name)
        ax_log.set_title(self.name)

        ax_lin.legend([cl_type])
        ax_log.legend([cl_type])

        fig.savefig(path + "_" + cl_type + ".pdf")

        # Store parameters (contained in self.scenario) to text file
        parameters = dict(self.verbose.items() + self.scenario.items())
        with open(path + ".ini", "w") as param_file:
            for key, value in parameters.iteritems():
                param_file.write(key + " = " + str(value) + "\n")

    def pk_faulty_plot(self, k, reference, candidate):
        path = os.path.join(self.faulty_figs_path, self.name)

        fig = plt.figure()
        ax_lin = plt.subplot(211)
        ax_log = plt.subplot(212)
        ax_lin.plot(k, 1 - candidate / reference)
        ax_log.loglog(k, abs(1 - candidate / reference))

        ax_lin.set_xlabel("k")
        ax_log.set_xlabel("k")
        ax_lin.set_ylabel("1-candidate/reference")
        ax_log.set_ylabel("abs(1-candidate/reference)")

        ax_lin.set_title(self.name)
        ax_log.set_title(self.name)

        ax_lin.legend("$P_k$")
        ax_log.legend("$P_k$")

        fig.savefig(path + "_" + "pk" + ".pdf")

        # Store parameters (contained in self.scenario) to text file
        parameters = dict(self.verbose.items() + self.scenario.items())
        with open(path + ".ini", "w") as param_file:
            for key, value in parameters.iteritems():
                param_file.write(key + " = " + str(value) + "\n")
Пример #26
0
class classy(SlikPlugin):
    """
    Plugin for CLASS.
    Credit: Brent Follin, Teresa Hamill, Andy Scacco
    """

    #{cosmoslik name : class name} - This needs to be done even for variables with the same name (because of for loop in self.model.set)!
    name_mapping = {'As':'A_s',
                    'ns':'n_s',
                    'r':'r',
                    'k_c':'k_c',
                    'alpha_exp':'alpha_exp',
                    'nt':'n_t',
                    'ombh2':'omega_b',
                    'omch2':'omega_cdm',
                    'omnuh2':'omega_ncdm',
                    'tau':'tau_reio',
                    'H0':'H0',
                    'massive_neutrinos':'N_ncdm',
                    'massless_neutrinos':'N_ur',
                    'Yp':'YHe',
                    'pivot_scalar':'k_pivot',
                    #'Tcmb':'T_cmb',
                    #'P_k_max_hinvMpc':'P_k_max_h/Mpc'
                    #'w':'w0_fld',
                    #'nrun':'alpha_s',
                    #'omk':'Omega_k',
                    #'l_max_scalar':'l_max_scalars',
                    #'l_max_tensor':'l_max_tensors'
                    }


    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()


    def __call__(self,
                 ombh2,
                 omch2,
                 H0,
                 As,
                 ns,
                 k_c,
                 alpha_exp,
                 tau,
                 #omnuh2=0, #0.006  #None means that Class will take the default for this, maybe?
                 w=None,
                 r=None,
                 nrun=None,
                 omk=0,
                 Yp=None,
                 Tcmb=2.7255,
                 #massive_neutrinos=0,
                 massless_neutrinos=3.046,
                 l_max_scalar=3000,
                 l_max_tensor=3000,
                 pivot_scalar=0.05,
                 outputs=[],
                 **kwargs):


        
        self.model.set(output='tCl, lCl, pCl',
                       lensing='yes',
                       l_max_scalars=l_max_scalar,
                       **{self.name_mapping[k]:v for k,v in locals().items() 
                          if k in self.name_mapping and v is not None})
        self.model.compute()

        ell = arange(l_max_scalar+1)
        self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max_scalar)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
                           for x in ['TT','TE','EE','BB','PP','TP']}

        self.model.struct_cleanup()
        self.model.empty()
        
        return self.cmb_result

    def get_bao_observables(self, z):
        return {'H':self.model.Hubble(z),
                'D_A':self.model.angular_distance(z),
                'c':1.0,
                'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
Пример #27
0
                   'tau_reio':0.0925,
                   # Take fixed value for primordial Helium (instead of automatic BBN adjustment)
                   'YHe':0.246,
                   # other output and precision parameters
                   'l_max_scalars':5000}
###############
#
# call CLASS
#
M = Class()
M.set(common_settings)
M.compute()
cl_tot = M.raw_cl(3000)
cl_lensed = M.lensed_cl(3000)
M.struct_cleanup()  # clean output
M.empty()           # clean input
#
M.set(common_settings) # new input
M.set({'temperature contributions':'tsw'})
M.compute()
cl_tsw = M.raw_cl(3000)
M.struct_cleanup()
M.empty()
#
M.set(common_settings)
M.set({'temperature contributions':'eisw'})
M.compute()
cl_eisw = M.raw_cl(3000)
M.struct_cleanup()
M.empty()
#
Пример #28
0
class Sampler:
    def __init__(self, NSIDE, As):
        self.NSIDE = NSIDE
        self.Npix = 12 * NSIDE**2
        self.As = As
        print("Initialising sampler")
        self.cosmo = Class()
        #print("Maps")
        #A recommenter
        #self.Qs, self.Us, self.sigma_Qs, self.sigma_Us = aggregate_by_pixels_params(get_pixels_params(self.NSIDE))
        #print("betas")
        self.matrix_mean, self.matrix_var = aggregate_mixing_params(
            get_mixing_matrix_params(self.NSIDE))
        print("Cosmo params")
        self.cosmo_means = np.array(COSMO_PARAMS_MEANS)
        self.cosmo_stdd = np.diag(COSMO_PARAMS_SIGMA)

        self.instrument = pysm.Instrument(
            get_instrument('litebird', self.NSIDE))
        self.components = [CMB(), Dust(150.), Synchrotron(150.)]
        self.mixing_matrix = MixingMatrix(*self.components)
        self.mixing_matrix_evaluator = self.mixing_matrix.evaluator(
            self.instrument.Frequencies)

        self.noise_covar_one_pix = self.noise_covariance_in_freq(self.NSIDE)
        #A recommenter
        #self.noise_stdd_all = np.concatenate([np.sqrt(self.noise_covar_one_pix) for _ in range(2*self.Npix)])
        print("End of initialisation")

    def __getstate__(self):
        state_dict = self.__dict__.copy()
        del state_dict["mixing_matrix_evaluator"]
        del state_dict["cosmo"]
        del state_dict["mixing_matrix"]
        del state_dict["components"]
        return state_dict

    def __setstate__(self, state):
        self.__dict__.update(state)
        self.cosmo = Class()
        self.components = [CMB(), Dust(150.), Synchrotron(150.)]
        self.mixing_matrix = MixingMatrix(*self.components)
        self.mixing_matrix_evaluator = self.mixing_matrix.evaluator(
            self.instrument.Frequencies)

    def prepare_sigma(self, input):
        sampled_beta, i = input
        mixing_mat = list(self.sample_mixing_matrix_parallel(sampled_beta))
        mean = np.dot(mixing_mat, (self.Qs + self.Us)[i])
        sigma = np.diag(self.noise_covar_one_pix) + np.einsum(
            "ij,jk,lk", mixing_mat, (np.diag(
                (self.sigma_Qs + self.sigma_Us)[i])**2), mixing_mat)

        sigma_symm = (sigma + sigma.T) / 2
        log_det = np.log(scipy.linalg.det(2 * np.pi * sigma_symm))
        return mean, sigma_symm, log_det

    def sample_mixing_matrix_parallel(self, betas):
        return self.mixing_matrix_evaluator(betas)[:, 1:]

    def sample_normal(self, mu, stdd, diag=False):
        standard_normal = np.random.normal(0, 1, size=mu.shape[0])
        if diag:
            normal = np.multiply(stdd, standard_normal)
        else:
            normal = np.dot(stdd, standard_normal)

        normal += mu
        return normal

    def noise_covariance_in_freq(self, nside):
        cov = LiteBIRD_sensitivities**2 / hp.nside2resol(nside, arcmin=True)**2
        return cov

    def sample_model_parameters(self):
        #sampled_cosmo = self.sample_normal(self.cosmo_means, self.cosmo_stdd)
        sampled_cosmo = np.array(
            [0.9665, 0.02242, 0.11933, 1.04101, self.As, 0.0561])
        #sampled_beta = self.sample_normal(self.matrix_mean, self.matrix_var, diag = True).reshape((self.Npix, -1), order = "F")
        sampled_beta = self.matrix_mean.reshape((self.Npix, -1), order="F")
        return sampled_cosmo, sampled_beta

    def sample_CMB_QU(self, cosmo_params):
        params = {
            'output': OUTPUT_CLASS,
            'l_max_scalars': L_MAX_SCALARS,
            'lensing': LENSING
        }
        params.update(cosmo_params)
        print(params)
        self.cosmo.set(params)
        self.cosmo.compute()
        cls = self.cosmo.lensed_cl(L_MAX_SCALARS)
        eb_tb = np.zeros(shape=cls["tt"].shape)
        _, Q, U = hp.synfast(
            (cls['tt'], cls['ee'], cls['bb'], cls['te'], eb_tb, eb_tb),
            nside=self.NSIDE,
            new=True)
        self.cosmo.struct_cleanup()
        self.cosmo.empty()
        return Q, U

    def sample_mixing_matrix(self, betas):
        #mat_pixels = []
        #for i in range(self.Npix):
        #    m = self.mixing_matrix_evaluator(betas[i,:])[:, 1:]
        #    mat_pixels.append(m)

        mat_pixels = (self.mixing_matrix_evaluator(beta)[:, 1:]
                      for beta in betas)
        return mat_pixels

    def sample_mixing_matrix_full(self, betas):
        #mat_pixels = []
        #for i in range(self.Npix):
        #    m = self.mixing_matrix_evaluator(betas[i,:])
        #    mat_pixels.append(m)

        mat_pixels = (self.mixing_matrix_evaluator(beta) for beta in betas)
        return mat_pixels

    def sample_model(self, input_params):
        random_seed = input_params
        np.random.seed(random_seed)
        cosmo_params, _ = self.sample_model_parameters()
        cosmo_dict = {
            l[0]: l[1]
            for l in zip(COSMO_PARAMS_NAMES, cosmo_params.tolist())
        }
        tuple_QU = self.sample_CMB_QU(cosmo_dict)
        map_CMB = np.concatenate(tuple_QU)
        result = {"map_CMB": map_CMB, "cosmo_params": cosmo_params}
        with open("B3DCMB/data/temp" + str(random_seed), "wb") as f:
            pickle.dump(result, f)

        return cosmo_params

    def compute_weight(self, input):
        observed_data = config.sky_map
        noise_level, random_seed = input
        np.random.seed(random_seed)
        with open("B3DCMB/data/temp" + str(random_seed), "rb") as f:
            data = pickle.load(f)

        map_CMB = data["map_CMB"]
        print("Duplicating CMB")
        duplicate_CMB = (l for l in map_CMB for _ in range(15))
        print("Splitting for computation")
        #Le problème est surement que chaque ligne de X doit être en fortran order, ce qui du coup est aussi C order !!!
        x = np.ascontiguousarray(
            (observed_data - np.array(list(duplicate_CMB)) -
             np.array(config.means)).reshape(self.Npix * 2, -1))
        print("Computing log weights")
        #r = np.sum((np.dot(l[1], scipy.linalg.solve(l[0], l[1].T)) for l in zip(config.sigmas_symm, x)))
        r = compute_exponent(config.sigmas_symm, x, 2 * self.Npix)
        lw = (-1 / 2) * r + config.denom
        return lw

    def sample_data(self):
        print("Sampling parameters")
        cosmo_params, sampled_beta = self.sample_model_parameters()
        print("Computing mean and cov of map")
        mean_map = np.array([i for l in self.Qs + self.Us for i in l])
        stdd_map = [i for l in self.sigma_Qs + self.sigma_Us for i in l]
        print("Sampling maps Dust and Sync")
        maps = self.sample_normal(mean_map, stdd_map, diag=True)
        print("Computing cosmo params")
        cosmo_dict = {
            l[0]: l[1]
            for l in zip(COSMO_PARAMS_NAMES, cosmo_params.tolist())
        }
        print("Sampling CMB signal")
        tuple_QU = self.sample_CMB_QU(cosmo_dict)
        map_CMB = np.concatenate(tuple_QU)
        print("Creating mixing matrix")
        mixing_matrix = self.sample_mixing_matrix(sampled_beta)
        print("Scaling to frequency maps")
        #freq_maps = np.dot(scipy.linalg.block_diag(*2*mixing_matrix), maps.T)
        freq_pixels = []
        mix1, mix2 = tee(mixing_matrix)
        for i, mat in enumerate(chain(mix1, mix2)):
            freq_pix = np.dot(mat, maps[2 * i:(2 * i + 2)].T)
            freq_pixels.append(freq_pix)

        freq_maps = np.concatenate(freq_pixels)
        print("Adding CMB to frequency maps")
        duplicated_cmb = np.repeat(map_CMB, 15)
        print("Creating noise")
        noise = self.sample_normal(np.zeros(2 * 15 * self.Npix),
                                   self.noise_stdd_all,
                                   diag=True)
        print("Adding noise to the maps")
        sky_map = np.add(np.add(freq_maps, duplicated_cmb), noise)
        #sky_map = np.add(freq_maps, duplicated_cmb)
        return {
            "sky_map": sky_map,
            "cosmo_params": cosmo_params,
            "betas": sampled_beta
        }
Пример #29
0
class classy(BoltzmannBase):
    # Name of the Class repo/folder and version to download
    _classy_repo_name = "lesgourg/class_public"
    _min_classy_version = "v2.9.3"
    _classy_repo_version = os.environ.get('CLASSY_REPO_VERSION', _min_classy_version)

    def initialize(self):
        """Importing CLASS from the correct path, if given, and if not, globally."""
        # Allow global import if no direct path specification
        allow_global = not self.path
        if not self.path and self.packages_path:
            self.path = self.get_path(self.packages_path)
        self.classy_module = self.is_installed(path=self.path, allow_global=allow_global)
        if not self.classy_module:
            raise NotInstalledError(
                self.log, "Could not find CLASS. Check error message above.")
        from classy import Class, CosmoSevereError, CosmoComputationError
        global CosmoComputationError, CosmoSevereError
        self.classy = Class()
        super().initialize()
        # Add general CLASS stuff
        self.extra_args["output"] = self.extra_args.get("output", "")
        if "sBBN file" in self.extra_args:
            self.extra_args["sBBN file"] = (
                self.extra_args["sBBN file"].format(classy=self.path))
        # Derived parameters that may not have been requested, but will be necessary later
        self.derived_extra = []
        self.log.info("Initialized!")

    def must_provide(self, **requirements):
        # Computed quantities required by the likelihood
        super().must_provide(**requirements)
        for k, v in self._must_provide.items():
            # Products and other computations
            if k == "Cl":
                if any(("t" in cl.lower()) for cl in v):
                    self.extra_args["output"] += " tCl"
                if any((("e" in cl.lower()) or ("b" in cl.lower())) for cl in v):
                    self.extra_args["output"] += " pCl"
                # For modern experiments, always lensed Cl's!
                self.extra_args["output"] += " lCl"
                self.extra_args["lensing"] = "yes"
                # For l_max_scalars, remember previous entries.
                self.extra_args["l_max_scalars"] = max(v.values())
                self.collectors[k] = Collector(
                    method="lensed_cl", kwargs={"lmax": self.extra_args["l_max_scalars"]})
                if 'T_cmb' not in self.derived_extra:
                    self.derived_extra += ['T_cmb']
            elif k == "Hubble":
                self.collectors[k] = Collector(
                    method="Hubble",
                    args=[np.atleast_1d(v["z"])],
                    args_names=["z"],
                    arg_array=0)
            elif k == "angular_diameter_distance":
                self.collectors[k] = Collector(
                    method="angular_distance",
                    args=[np.atleast_1d(v["z"])],
                    args_names=["z"],
                    arg_array=0)
            elif k == "comoving_radial_distance":
                self.collectors[k] = Collector(
                    method="z_of_r",
                    args_names=["z"],
                    args=[np.atleast_1d(v["z"])])
            elif isinstance(k, tuple) and k[0] == "Pk_grid":
                self.extra_args["output"] += " mPk"
                v = deepcopy(v)
                self.add_P_k_max(v.pop("k_max"), units="1/Mpc")
                # NB: Actually, only the max z is used, and the actual sampling in z
                # for computing P(k,z) is controlled by `perturb_sampling_stepsize`
                # (default: 0.1). But let's leave it like this in case this changes
                # in the future.
                self.add_z_for_matter_power(v.pop("z"))

                if v["nonlinear"] and "non linear" not in self.extra_args:
                    self.extra_args["non linear"] = non_linear_default_code
                pair = k[2:]
                if pair == ("delta_tot", "delta_tot"):
                    v["only_clustering_species"] = False
                elif pair == ("delta_nonu", "delta_nonu"):
                    v["only_clustering_species"] = True
                else:
                    raise LoggedError(self.log, "NotImplemented in CLASS: %r", pair)
                self.collectors[k] = Collector(
                    method="get_pk_and_k_and_z",
                    kwargs=v,
                    post=(lambda P, kk, z: (kk, z, np.array(P).T)))
            elif isinstance(k, tuple) and k[0] == "sigma_R":
                raise LoggedError(
                    self.log, "Classy sigma_R not implemented as yet - use CAMB only")
            elif v is None:
                k_translated = self.translate_param(k)
                if k_translated not in self.derived_extra:
                    self.derived_extra += [k_translated]
            else:
                raise LoggedError(self.log, "Requested product not known: %r", {k: v})
        # Derived parameters (if some need some additional computations)
        if any(("sigma8" in s) for s in self.output_params or requirements):
            self.extra_args["output"] += " mPk"
            self.add_P_k_max(1, units="1/Mpc")
        # Adding tensor modes if requested
        if self.extra_args.get("r") or "r" in self.input_params:
            self.extra_args["modes"] = "s,t"
        # If B spectrum with l>50, or lensing, recommend using Halofit
        cls = self._must_provide.get("Cl", {})
        has_BB_l_gt_50 = (any(("b" in cl.lower()) for cl in cls) and
                          max(cls[cl] for cl in cls if "b" in cl.lower()) > 50)
        has_lensing = any(("p" in cl.lower()) for cl in cls)
        if (has_BB_l_gt_50 or has_lensing) and not self.extra_args.get("non linear"):
            self.log.warning("Requesting BB for ell>50 or lensing Cl's: "
                             "using a non-linear code is recommended (and you are not "
                             "using any). To activate it, set "
                             "'non_linear: halofit|hmcode|...' in classy's 'extra_args'.")
        # Cleanup of products string
        self.extra_args["output"] = " ".join(set(self.extra_args["output"].split()))
        self.check_no_repeated_input_extra()

    def add_z_for_matter_power(self, z):
        if getattr(self, "z_for_matter_power", None) is None:
            self.z_for_matter_power = np.empty(0)
        self.z_for_matter_power = np.flip(np.sort(np.unique(np.concatenate(
            [self.z_for_matter_power, np.atleast_1d(z)]))), axis=0)
        self.extra_args["z_pk"] = " ".join(["%g" % zi for zi in self.z_for_matter_power])

    def add_P_k_max(self, k_max, units):
        r"""
        Unifies treatment of :math:`k_\mathrm{max}` for matter power spectrum:
        ``P_k_max_[1|h]/Mpc]``.

        Make ``units="1/Mpc"|"h/Mpc"``.
        """
        # Fiducial h conversion (high, though it may slow the computations)
        h_fid = 1
        if units == "h/Mpc":
            k_max *= h_fid
        # Take into account possible manual set of P_k_max_***h/Mpc*** through extra_args
        k_max_old = self.extra_args.pop(
            "P_k_max_1/Mpc", h_fid * self.extra_args.pop("P_k_max_h/Mpc", 0))
        self.extra_args["P_k_max_1/Mpc"] = max(k_max, k_max_old)

    def set(self, params_values_dict):
        # If no output requested, remove arguments that produce an error
        # (e.g. complaints if halofit requested but no Cl's computed.)
        # Needed for facilitating post-processing
        if not self.extra_args["output"]:
            for k in ["non linear"]:
                self.extra_args.pop(k, None)
        # Prepare parameters to be passed: this-iteration + extra
        args = {self.translate_param(p): v for p, v in params_values_dict.items()}
        args.update(self.extra_args)
        # Generate and save
        self.log.debug("Setting parameters: %r", args)
        self.classy.set(**args)

    def calculate(self, state, want_derived=True, **params_values_dict):
        # Set parameters
        self.set(params_values_dict)
        # Compute!
        try:
            self.classy.compute()
        # "Valid" failure of CLASS: parameters too extreme -> log and report
        except CosmoComputationError as e:
            if self.stop_at_error:
                self.log.error(
                    "Computation error (see traceback below)! "
                    "Parameters sent to CLASS: %r and %r.\n"
                    "To ignore this kind of error, make 'stop_at_error: False'.",
                    state["params"], dict(self.extra_args))
                raise
            else:
                self.log.debug("Computation of cosmological products failed. "
                               "Assigning 0 likelihood and going on. "
                               "The output of the CLASS error was %s" % e)
            return False
        # CLASS not correctly initialized, or input parameters not correct
        except CosmoSevereError:
            self.log.error("Serious error setting parameters or computing results. "
                           "The parameters passed were %r and %r. To see the original "
                           "CLASS' error traceback, make 'debug: True'.",
                           state["params"], self.extra_args)
            raise  # No LoggedError, so that CLASS traceback gets printed
        # Gather products
        for product, collector in self.collectors.items():
            # Special case: sigma8 needs H0, which cannot be known beforehand:
            if "sigma8" in self.collectors:
                self.collectors["sigma8"].args[0] = 8 / self.classy.h()
            method = getattr(self.classy, collector.method)
            arg_array = self.collectors[product].arg_array
            if arg_array is None:
                state[product] = method(
                    *self.collectors[product].args, **self.collectors[product].kwargs)
            elif isinstance(arg_array, int):
                state[product] = np.zeros(
                    len(self.collectors[product].args[arg_array]))
                for i, v in enumerate(self.collectors[product].args[arg_array]):
                    args = (list(self.collectors[product].args[:arg_array]) + [v] +
                            list(self.collectors[product].args[arg_array + 1:]))
                    state[product][i] = method(
                        *args, **self.collectors[product].kwargs)
            elif arg_array in self.collectors[product].kwargs:
                value = np.atleast_1d(self.collectors[product].kwargs[arg_array])
                state[product] = np.zeros(value.shape)
                for i, v in enumerate(value):
                    kwargs = deepcopy(self.collectors[product].kwargs)
                    kwargs[arg_array] = v
                    state[product][i] = method(
                        *self.collectors[product].args, **kwargs)
            if collector.post:
                state[product] = collector.post(*state[product])
        # Prepare derived parameters
        d, d_extra = self._get_derived_all(derived_requested=want_derived)
        if want_derived:
            state["derived"] = {p: d.get(p) for p in self.output_params}
            # Prepare necessary extra derived parameters
        state["derived_extra"] = deepcopy(d_extra)

    def _get_derived_all(self, derived_requested=True):
        """
        Returns a dictionary of derived parameters with their values,
        using the *current* state (i.e. it should only be called from
        the ``compute`` method).

        Parameter names are returned in CLASS nomenclature.

        To get a parameter *from a likelihood* use `get_param` instead.
        """
        # TODO: fails with derived_requested=False
        # Put all parameters in CLASS nomenclature (self.derived_extra already is)
        requested = [self.translate_param(p) for p in (
            self.output_params if derived_requested else [])]
        requested_and_extra = dict.fromkeys(set(requested).union(set(self.derived_extra)))
        # Parameters with their own getters
        if "rs_drag" in requested_and_extra:
            requested_and_extra["rs_drag"] = self.classy.rs_drag()
        if "Omega_nu" in requested_and_extra:
            requested_and_extra["Omega_nu"] = self.classy.Omega_nu
        if "T_cmb" in requested_and_extra:
            requested_and_extra["T_cmb"] = self.classy.T_cmb()
        # Get the rest using the general derived param getter
        # No need for error control: classy.get_current_derived_parameters is passed
        # every derived parameter not excluded before, and cause an error, indicating
        # which parameters are not recognized
        requested_and_extra.update(
            self.classy.get_current_derived_parameters(
                [p for p, v in requested_and_extra.items() if v is None]))
        # Separate the parameters before returning
        # Remember: self.output_params is in sampler nomenclature,
        # but self.derived_extra is in CLASS
        derived = {
            p: requested_and_extra[self.translate_param(p)] for p in self.output_params}
        derived_extra = {p: requested_and_extra[p] for p in self.derived_extra}
        return derived, derived_extra

    def get_Cl(self, ell_factor=False, units="FIRASmuK2"):
        try:
            cls = deepcopy(self._current_state["Cl"])
        except:
            raise LoggedError(
                self.log,
                "No Cl's were computed. Are you sure that you have requested them?")
        # unit conversion and ell_factor
        ells_factor = ((cls["ell"] + 1) * cls["ell"] / (2 * np.pi))[
                      2:] if ell_factor else 1
        units_factor = self._cmb_unit_factor(
            units, self._current_state['derived_extra']['T_cmb'])

        for cl in cls:
            if cl not in ['pp', 'ell']:
                cls[cl][2:] *= units_factor ** 2 * ells_factor
        if "pp" in cls and ell_factor:
            cls['pp'][2:] *= ells_factor ** 2 * (2 * np.pi)
        return cls

    def _get_z_dependent(self, quantity, z):
        try:
            z_name = next(k for k in ["redshifts", "z"]
                          if k in self.collectors[quantity].kwargs)
            computed_redshifts = self.collectors[quantity].kwargs[z_name]
        except StopIteration:
            computed_redshifts = self.collectors[quantity].args[
                self.collectors[quantity].args_names.index("z")]
        i_kwarg_z = np.concatenate(
            [np.where(computed_redshifts == zi)[0] for zi in np.atleast_1d(z)])
        values = np.array(deepcopy(self._current_state[quantity]))
        if quantity == "comoving_radial_distance":
            values = values[0]
        return values[i_kwarg_z]

    def close(self):
        self.classy.empty()

    def get_can_provide_params(self):
        names = ['Omega_Lambda', 'Omega_cdm', 'Omega_b', 'Omega_m', 'rs_drag', 'z_reio',
                 'YHe', 'Omega_k', 'age', 'sigma8']
        for name, mapped in self.renames.items():
            if mapped in names:
                names.append(name)
        return names

    def get_version(self):
        return getattr(self.classy_module, '__version__', None)

    # Installation routines

    @classmethod
    def get_path(cls, path):
        return os.path.realpath(os.path.join(path, "code", cls.__name__))

    @classmethod
    def get_import_path(cls, path):
        log = logging.getLogger(cls.__name__)
        classy_build_path = os.path.join(path, "python", "build")
        if not os.path.isdir(classy_build_path):
            log.error("Either CLASS is not in the given folder, "
                      "'%s', or you have not compiled it.", path)
            return None
        py_version = "%d.%d" % (sys.version_info.major, sys.version_info.minor)
        try:
            post = next(d for d in os.listdir(classy_build_path)
                        if (d.startswith("lib.") and py_version in d))
        except StopIteration:
            log.error("The CLASS installation at '%s' has not been compiled for the "
                      "current Python version.", path)
            return None
        return os.path.join(classy_build_path, post)

    @classmethod
    def is_compatible(cls):
        import platform
        if platform.system() == "Windows":
            return False
        return True

    @classmethod
    def is_installed(cls, **kwargs):
        log = logging.getLogger(cls.__name__)
        if not kwargs.get("code", True):
            return True
        path = kwargs["path"]
        if path is not None and path.lower() == "global":
            path = None
        if path and not kwargs.get("allow_global"):
            log.info("Importing *local* CLASS from '%s'.", path)
            if not os.path.exists(path):
                log.error("The given folder does not exist: '%s'", path)
                return False
            classy_build_path = cls.get_import_path(path)
            if not classy_build_path:
                return False
        elif not path:
            log.info("Importing *global* CLASS.")
            classy_build_path = None
        else:
            log.info("Importing *auto-installed* CLASS (but defaulting to *global*).")
            classy_build_path = cls.get_import_path(path)
        try:
            return load_module(
                'classy', path=classy_build_path, min_version=cls._classy_repo_version)
        except ImportError:
            if path is not None and path.lower() != "global":
                log.error("Couldn't find the CLASS python interface at '%s'. "
                          "Are you sure it has been installed there?", path)
            else:
                log.error("Could not import global CLASS installation. "
                          "Specify a Cobaya or CLASS installation path, "
                          "or install the CLASS Python interface globally with "
                          "'cd /path/to/class/python/ ; python setup.py install'")
            return False
        except VersionCheckError as e:
            log.error(str(e))
            return False

    @classmethod
    def install(cls, path=None, force=False, code=True, no_progress_bars=False, **kwargs):
        log = logging.getLogger(cls.__name__)
        if not code:
            log.info("Code not requested. Nothing to do.")
            return True
        log.info("Installing pre-requisites...")
        exit_status = pip_install("cython")
        if exit_status:
            log.error("Could not install pre-requisite: cython")
            return False
        log.info("Downloading classy...")
        success = download_github_release(
            os.path.join(path, "code"), cls._classy_repo_name, cls._classy_repo_version,
            repo_rename=cls.__name__, no_progress_bars=no_progress_bars, logger=log)
        if not success:
            log.error("Could not download classy.")
            return False
        classy_path = cls.get_path(path)
        log.info("Compiling classy...")
        from subprocess import Popen, PIPE
        env = deepcopy(os.environ)
        env.update({"PYTHON": sys.executable})
        process_make = Popen(["make"], cwd=classy_path, stdout=PIPE, stderr=PIPE, env=env)
        out, err = process_make.communicate()
        if process_make.returncode:
            log.info(out)
            log.info(err)
            log.error("Compilation failed!")
            return False
        return True
Пример #30
0
def constraints(params, zs):
    cosmo = Class()
    cosmo.set(params)
    cosmo.compute()
    h = cosmo.Hubble(0) * 299792.458
    om0 = cosmo.Omega0_m()
    #print(cosmo.pars)
    zarr2 = cosmo.get_background().get('z')
    hz = cosmo.get_background().get('H [1/Mpc]')
    hf = interpolate.InterpolatedUnivariateSpline(zarr2[-1:0:-1], hz[-1:0:-1])
    chiz = cosmo.get_background().get('comov. dist.')
    chif = interpolate.InterpolatedUnivariateSpline(zarr2[-1:0:-1],
                                                    chiz[-1:0:-1])

    pkz = np.zeros((nk, nz))
    pklz2 = np.zeros((nk, nz))
    dprime = np.zeros((nk, 1))
    zarr = np.linspace(0, zmax, nz)
    karr = np.logspace(-3, np.log10(20), nk)
    rcollarr = np.zeros(nz)
    kcarr = np.zeros(nz)
    delz = 0.01

    for i in np.arange(nz):
        Dz = (cosmo.scale_independent_growth_factor(zarr[i]) /
              cosmo.scale_independent_growth_factor(0))
        sigz = lambda x: Dz * cosmo.sigma(x, zarr[i]) - 1.192182033080519
        if (sigz(1e-5) > 0) & (sigz(10) < 0):
            rcollarr[i] = optimize.brentq(sigz, 1e-5, 10)
        else:
            rcollarr[i] = 0
        for j in np.arange(nk):
            pkz[j, i] = cosmo.pk(karr[j], zarr[i])
    for i in np.arange(nk):
        pklz0 = np.log(cosmo.pk(karr[i], zs - delz) / cosmo.pk(karr[i], 0))
        pklz1 = np.log(cosmo.pk(karr[i], zs + delz) / cosmo.pk(karr[i], 0))
        pklz2[i] = cosmo.pk(karr[i], 0)
        dprime[i] = -hf(zs) * np.sqrt(
            cosmo.pk(karr[i], zs) / pklz2[i, 0]) * (pklz1 - pklz0) / 4 / delz
        #divided by 2 for step size, another for defining D'

    w0 = params.get('w0_fld')
    wa = params.get('wa_fld')
    mt = 5 * np.log10(cosmo.luminosity_distance(zs))
    #mt = 5*np.log10(fanal.dlatz(zs, om0, og0, w0, wa))
    Rc = (2 * 4.302e-9 * Mc / h**2 / om0)**(1 / 3)
    mask = (0 < rcollarr) & (rcollarr < Rc)
    kcarr[mask] = 2 * np.pi / rcollarr[mask]
    mask = (rcollarr >= Rc)
    kcarr[mask] = 2 * np.pi / Rc
    #plt.semilogy(zarr, kcarr)
    pksmooth = pdf.pkint(karr, zarr, pkz, kcarr)

    par2 = {'w0': w0, 'wa': wa, 'Omega_m': om0}
    print(par2)
    #kmin = conv.kmin(zs, chif, hf)*(-3./2.*hf(0)**2*om0)
    kvar = conv.kvar(zs, pksmooth, chif, hf) * (3. / 2. * hf(0)**2 * om0)**2
    #sigln = np.log(1+kvar/np.abs(kmin)**2)
    #L = pdf.convpdf(kmin, sigln, sig, mfid-mt)
    #sigln = np.sqrt(sig**2+(5/np.log(10))**2*kvar)
    #L = pdf.gausspdf(sig, mfid-mt)
    #lnL = mt/sig
    vvar = np.trapz(pklz2[:, 0] * dprime[:, 0]**2,
                    karr) / 6 / np.pi**2 * (1 -
                                            (1 + zs) / hf(zs) / chif(zs))**2
    #var_tot = norm*kvar+sig**2
    lnL = -mt**2 / 2
    print('Sigmasq = {}, {}, Likelihood = {}'.format(kvar, vvar, lnL))
    cosmo.struct_cleanup()
    cosmo.empty()
    return [mt, kvar, vvar]


#[mt, var_tot]
Пример #31
0
    'lensing': 'no',
    'n_s': 0.9660499,
    'l_max_scalars': l_max_scalars
})
M.compute()
cls = M.raw_cl(l_max_scalars)

# In[ ]:

###############
#
# call CLASS : tensors only
#
###############
#
M.empty(
)  # reset input parameters to default, before passing a new parameter set
M.set(common_settings)
M.set({
    'output': 'tCl,pCl',
    'modes': 't',
    'lensing': 'no',
    'r': 0.1,
    'n_t': 0,
    'l_max_tensors': l_max_tensors
})
M.compute()
clt = M.raw_cl(l_max_tensors)

# In[ ]:

###############
Пример #32
0
class TestClass(unittest.TestCase):
    """
    Testing Class and its wrapper classy on different cosmologies

    To run it, do
    ~] nosetest test_class.py

    It will run many times Class, on different cosmological scenarios, and
    everytime testing for different output possibilities (none asked, only mPk,
    etc..)

    """

    def setUp(self):
        """
        set up data used in the tests.
        setUp is called before each test function execution.
        """
        self.cosmo = Class()

        self.verbose = {
            'background_verbose': 1,
            'thermodynamics_verbose': 1,
            'perturbations_verbose': 1,
            'transfer_verbose': 1,
            'primordial_verbose': 1,
            'spectra_verbose': 1,
            'nonlinear_verbose': 1,
            'lensing_verbose': 1,
            'output_verbose': 1}
        self.scenario = {'lensing':'yes'}

    def tearDown(self):
        self.cosmo.struct_cleanup()
        self.cosmo.empty()
        del self.scenario

    @parameterized.expand(
        itertools.product(
            ('LCDM',
             'Mnu',
             'Positive_Omega_k',
             'Negative_Omega_k',
             'Isocurvature_modes', ),
            ({'output': ''}, {'output': 'mPk'}, {'output': 'tCl'},
             {'output': 'tCl pCl lCl'}, {'output': 'mPk tCl lCl', 'P_k_max_h/Mpc':10},
             {'output': 'nCl sCl'}, {'output': 'tCl pCl lCl nCl sCl'}),
            ({'gauge': 'newtonian'}, {'gauge': 'sync'}),
            ({}, {'non linear': 'halofit'})))
    def test_parameters(self, name, scenario, gauge, nonlinear):
        """Create a few instances based on different cosmologies"""
        if name == 'Mnu':
            self.scenario.update({'N_ncdm': 1, 'm_ncdm': 0.06})
        elif name == 'Positive_Omega_k':
            self.scenario.update({'Omega_k': 0.01})
        elif name == 'Negative_Omega_k':
            self.scenario.update({'Omega_k': -0.01})
        elif name == 'Isocurvature_modes':
            self.scenario.update({'ic': 'ad,nid,cdi', 'c_ad_cdi': -0.5})

        self.scenario.update(scenario)
        if scenario != {}:
            self.scenario.update(gauge)
        self.scenario.update(nonlinear)

        sys.stderr.write('\n\n---------------------------------\n')
        sys.stderr.write('| Test case %s |\n' % name)
        sys.stderr.write('---------------------------------\n')
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")

        setting = self.cosmo.set(
            dict(self.verbose.items()+self.scenario.items()))
        self.assertTrue(setting, "Class failed to initialize with input dict")

        cl_list = ['tCl', 'lCl', 'pCl', 'nCl', 'sCl']

        # Depending on the cases, the compute should fail or not
        should_fail = True
        output = self.scenario['output'].split()
        for elem in output:
            if elem in ['tCl', 'pCl']:
                for elem2 in output:
                    if elem2 == 'lCl':
                        should_fail = False
                        break

        if not should_fail:
            self.cosmo.compute()
        else:
            self.assertRaises(CosmoSevereError, self.cosmo.compute)
            return

        self.assertTrue(
            self.cosmo.state,
            "Class failed to go through all __init__ methods")
        if self.cosmo.state:
            print '--> Class is ready'
        # Depending
        if 'output' in self.scenario.keys():
            # Positive tests
            output = self.scenario['output']
            for elem in output.split():
                if elem in cl_list:
                    print '--> testing raw_cl function'
                    cl = self.cosmo.raw_cl(100)
                    self.assertIsNotNone(cl, "raw_cl returned nothing")
                    self.assertEqual(
                        np.shape(cl['tt'])[0], 101,
                        "raw_cl returned wrong size")
                if elem == 'mPk':
                    print '--> testing pk function'
                    pk = self.cosmo.pk(0.1, 0)
                    self.assertIsNotNone(pk, "pk returned nothing")
            # Negative tests of output functions
            if not any([elem in cl_list for elem in output.split()]):
                print '--> testing absence of any Cl'
                self.assertRaises(CosmoSevereError, self.cosmo.raw_cl, 100)
            if 'mPk' not in self.scenario['output'].split():
                print '--> testing absence of mPk'
                #args = (0.1, 0)
                self.assertRaises(CosmoSevereError, self.cosmo.pk, 0.1, 0)

        print '~~~~~~~~ passed ? '

    @parameterized.expand(
        itertools.product(
            ('massless', 'massive', 'both'),
            ('photons', 'massless', 'exact'),
            ('t', 's, t')))
    def test_tensors(self, scenario, method, modes):
        """Test the new tensor mode implementation"""
        self.scenario = {}
        if scenario == 'massless':
            self.scenario.update({'N_eff': 3.046, 'N_ncdm':0})
        elif scenario == 'massiv':
            self.scenario.update(
                {'N_eff': 0, 'N_ncdm': 2, 'm_ncdm': '0.03, 0.04',
                 'deg_ncdm': '2, 1'})
        elif scenario == 'both':
            self.scenario.update(
                {'N_eff': 1.5, 'N_ncdm': 2, 'm_ncdm': '0.03, 0.04',
                 'deg_ncdm': '1, 0.5'})

        self.scenario.update({
            'tensor method': method, 'modes': modes,
            'output': 'tCl, pCl'})
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")
        self.cosmo.set(
            dict(self.verbose.items()+self.scenario.items()))
        self.cosmo.compute()
Пример #33
0
#
# scalars only
#
M = Class()
M.set(common_settings)
M.set({
    'output': 'tCl,pCl',
    'modes': 's',
    'lensing': 'no',
    'n_s': 0.9619,
    'l_max_scalars': 3000
})
M.compute()
cls = M.raw_cl(3000)
M.struct_cleanup()
M.empty()
#
# tensors only
#
M = Class()
M.set(common_settings)
l_max_tensors = 600
M.set({
    'output': 'tCl,pCl',
    'modes': 't',
    'lensing': 'no',
    'r': 0.1,
    'n_t': 0,
    'l_max_tensors': l_max_tensors
})
# for l_max=600 we can keep default precision
Пример #34
0
    'gauge': 'newtonian'
}
###############
#
# call CLASS a first time just to compute z_rec (will compute transfer functions at default: z=0)
#
M = Class()
M.set(common_settings)
M.compute()
derived = M.get_current_derived_parameters(
    ['z_rec', 'tau_rec', 'conformal_age'])
#print derived.viewkeys()
z_rec = derived['z_rec']
z_rec = int(1000. * z_rec) / 1000.  # round down at 4 digits after coma
M.struct_cleanup()  # clean output
M.empty()  # clean input
#
# call CLASS again (will compute transfer functions at inout value z_rec)
#
M = Class()
M.set(common_settings)
M.set({'z_pk': z_rec})
M.compute()
#
# load transfer functions at recombination
#
one_time = M.get_transfer(z_rec)
print one_time.viewkeys()
k = one_time['k (h/Mpc)']
Theta0 = 0.25 * one_time['d_g']
phi = one_time['phi']
Пример #35
0
    'N_eff': Neff_fid,
    'z_max_pk': 2,
    'k_per_decade_for_pk': 400,
    'k_per_decade_for_bao': 800,
    'P_k_max_1/Mpc': 1
}

LCDM_cosmo_class = Class()
LCDM_cosmo_class.set(common_settings)
LCDM_cosmo_class.compute()

kmin, kmax, nk = 1e-4, 1e0, 128
k = np.logspace(np.log10(kmin), np.log10(kmax), nk)
pk_class = np.array([LCDM_cosmo_class.pk(_k, z_effective) for _k in k])
LCDM_cosmo_class.struct_cleanup()
LCDM_cosmo_class.empty()

fclass1 = interpolate.interp1d(np.log(k),
                               np.log(pk_class),
                               fill_value="extrapolate",
                               kind='linear')
k_interp = np.logspace(np.log10(1e-3), np.log10(1e2), 70000)
u_interp = np.log(k_interp)
mPu_interp = fclass1(u_interp)
mPk_interp = np.exp(mPu_interp)

pk_hat = np.exp(savgol_filter(np.log(pk_class), 61, 3))
fclass2 = interpolate.interp1d(np.log(k),
                               np.log(pk_hat),
                               fill_value="extrapolate",
                               kind='linear')
Пример #36
0
    "transfer_neglect_delta_k_T_e": 100.,
    "transfer_neglect_delta_k_T_b": 100.,
    "neglect_CMB_sources_below_visibility": 1.e-30,
    "transfer_neglect_late_source": 3000.
    #     'm_ncdm' : 0.06
}
cosmo = Class()
cosmo.set(params)

cosmo.compute()
cls = cosmo.lensed_cl(ell_max)
ttCLASS = cls['ell'] * (cls['ell'] + 1) * cls['tt'] * (1e6 *
                                                       2.7255)**2 / (2 * np.pi)

cosmo.struct_cleanup()  ## Commented
cosmo.empty()  ## Commented

##################################################################
######## HIGH ELL TT #################################33
####################################################

plt.plot((ttCLASS[:ell_max + 1] - ttCAMB[:ell_max + 1]) / ttCAMB[:ell_max + 1],
         '-')
plt.xlabel(r'$\ell$')
plt.ylabel(r'$\Delta C_{\ell}^{TT} / C_{\ell}^{TT}$')
plt.title('TT Comparison of CAMB and CLASS')

########## P(k) #########################

pars.set_matter_power(redshifts=[0.], kmax=2.0)
pars.NonLinear = model.NonLinear_none
Пример #37
0
class classy(SlikPlugin):
    """
    Plugin for CLASS.
    Credit: Brent Follin, Teresa Hamill, Andy Scacco
    """

    #{cosmoslik name : class name} - This needs to be done even for variables with the same name (because of for loop in self.model.set)!
    name_mapping = {'As':'A_s',
                    'ns':'n_s',
                    'r':'r',
                    'phi0':'custom1',
                    'm6':'custom2',
                    'nt':'n_t',
                    'ombh2':'omega_b',
                    'omch2':'omega_cdm',
                    'omnuh2':'omega_ncdm',
                    'tau':'tau_reio',
                    'H0':'H0',
                    'massive_neutrinos':'N_ncdm',
                    'massless_neutrinos':'N_ur',
                    'Yp':'YHe',
                    'pivot_scalar':'k_pivot',
                    }


    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()


    def __call__(self,
                 ombh2,
                 omch2,
                 H0,
                 As,
                 ns,
                 phi0,
                 m6,
                 tau,
                 w=None,
                 r=None,
                 nrun=None,
                 omk=0,
                 Yp=None,
                 Tcmb=2.7255,
                 massless_neutrinos=3.046,
                 l_max_scalar=3000,
                 l_max_tensor=3000,
                 pivot_scalar=0.05,
                 outputs=[],
                 **kwargs):

        d={self.name_mapping[k]:v for k,v in locals().items() 
        if k in self.name_mapping and v is not None}
        d['P_k_ini type']='external_Pk'
        d['modes'] = 's,t'
        self.model.set(output='tCl, lCl, pCl',
                       lensing='yes',
                       l_max_scalars=l_max_scalar,
                       command = '../LSODAtesnors/pk',
                       **d)
        self.model.compute()

        ell = arange(l_max_scalar+1)
        self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max_scalar)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
                           for x in ['TT','TE','EE','BB','PP','TP']}

        self.model.struct_cleanup()
        self.model.empty()
        
        return self.cmb_result

    def get_bao_observables(self, z):
        return {'H':self.model.Hubble(z),
                'D_A':self.model.angular_distance(z),
                'c':1.0,
                'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
Пример #38
0
class classy(SlikPlugin):
    """
    Compute the CMB power spectrum with CLASS.

    Based on work by: Brent Follin, Teresa Hamill
    """

    #{cosmoslik name : class name}
    name_mapping = {
        'As': 'A_s',
        'lmax': 'l_max_scalars',
        'mnu': 'm_ncdm',
        'Neff': 'N_ncdm',
        'ns': 'n_s',
        'nt': 'n_t',
        'ombh2': 'omega_b',
        'omch2': 'omega_cdm',
        'omk': 'Omega_k',
        'pivot_scalar': 'k_pivot',
        'r': 'r',
        'tau': 'tau_reio',
        'Tcmb': 'T_cmb',
        'Yp': 'YHe',
    }

    def __init__(self, **defaults):
        super().__init__()
        from classy import Class
        self.model = Class()
        self.defaults = defaults

    def convert_params(self, **params):
        """
        Convert from CosmoSlik params to CLASS
        """
        params = {self.name_mapping.get(k, k): v for k, v in params.items()}
        if 'theta' in params:
            params['100*theta_s'] = 100 * params.pop('theta')
        params['lensing'] = 'yes' if params.pop('DoLensing', True) else 'no'
        return params

    def __call__(self,
                 As=None,
                 DoLensing=True,
                 H0=None,
                 lmax=None,
                 mnu=None,
                 Neff=None,
                 nrun=None,
                 ns=None,
                 ombh2=None,
                 omch2=None,
                 omk=None,
                 output='tCl, lCl, pCl',
                 pivot_scalar=None,
                 r=None,
                 tau=None,
                 Tcmb=2.7255,
                 theta=None,
                 w=None,
                 Yp=None,
                 nowarn=False,
                 **kwargs):

        if not nowarn and kwargs:
            print('Warning: passing unknown parameters to CLASS: ' +
                  str(kwargs) + ' (set nowarn=True to turn off this message.)')

        params = dict(
            self.defaults, **{
                k: v
                for k, v in arguments(include_kwargs=False,
                                      exclude=["nowarn"]).items()
                if v is not None
            })
        self.model.set(self.convert_params(**params))
        self.model.compute()

        lmax = params['lmax']
        ell = arange(lmax + 1)
        if params['DoLensing'] == True:
            self.cmb_result = {
                x: (self.model.lensed_cl(lmax)[x.lower()]) * Tcmb**2 * 1e12 *
                ell * (ell + 1) / 2 / pi
                for x in ['TT', 'TE', 'EE', 'BB', 'PP', 'TP']
            }
        else:
            self.cmb_result = {
                x: (self.model.raw_cl(lmax)[x.lower()]) * Tcmb**2 * 1e12 *
                ell * (ell + 1) / 2 / pi
                for x in ['TT']
            }

        self.model.struct_cleanup()
        self.model.empty()

        return self.cmb_result

    def get_bao_observables(self, z):
        return {
            'H':
            self.model.Hubble(z),
            'D_A':
            self.model.angular_distance(z),
            'c':
            1.0,
            'r_d':
            (self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']
        }
Пример #39
0
class Binning():
    def __init__(self, fname, outdir='./'):
        self._cosmo = Class()
        self._fname = fname
        self._outdir = outdir
        self._set_default_values()

    def _set_full_filenames(self, filesuffixes):
        """
        Return a list with the fullnames of the others, increasing their number
        in case they exist

        Additionally, set the full file names, of self._fparamsname and
        self.fshootname.
        """
        fullfilenames = []
        for suffix in filesuffixes + ['params', 'shooting']:
            fullfilenames.append(
                os.path.join(self._outdir, self._fname + '-' + suffix))

        i = 0

        bools = [True] * len(fullfilenames)

        while 1:
            for n, f in enumerate(fullfilenames):
                bools[n] = os.path.exists(f + '-%s.txt' % i)

            if True not in bools:
                break

            i += 1

        self._fparamsname = fullfilenames[-2] + '-%s.txt' % i
        self._fshootname = fullfilenames[-1] + '-%s.txt' % i

        return [f + '-%s.txt' % i for f in fullfilenames[:-2]]

    def _set_default_values(self):
        """
        Set default values of parameters lists.
        """
        self.params_smg = []
        self.params_2_smg = []
        self.h = []
        self.Omega_cdm = []

        self.gravity_model = []

        self._params = {
            "Omega_Lambda": 0,
            "Omega_fld": 0,
            "Omega_smg": -1,
            'output': 'mPk',  #
            'z_max_pk': 1000
        }  # Added for relative errors in f.

        self._computed = False
        self._path = []
        self._binType = ''

    def set_Pade(self,
                 n_num,
                 m_den,
                 xvar='a',
                 xReverse=False,
                 accuracy=1e-3,
                 increase=False,
                 maxfev=0):
        """
        Set what Pade polynomial orders, temporal variable and its ordering use.
        """
        self.reset()
        self._PadeOrder = [n_num, m_den]
        self._Pade_xvar = xvar
        self._Pade_xReverse = xReverse
        self._Pade_maxfev = maxfev
        self._Pade_increase = increase
        self._Pade_accuracy = accuracy
        self._binType = 'Pade'

    def set_fit(self,
                fit_function,
                n_coeffs,
                variable_to_fit,
                fit_function_label='',
                z_max_pk=1000,
                bounds=(-np.inf, np.inf),
                p0=[],
                xvar='ln(1+z)'):
        """
        Set the fitting_function and the number of coefficients.

        variable_to_fit must be one of 'F'or 'w'.

        fit_function_label will be written in the header of fit files.
        """
        self.reset()
        self._fit_function = fit_function
        self._n_coeffs = n_coeffs
        self._list_variables_to_fit = ['F', 'w', 'logRho', 'logX', 'X']
        if variable_to_fit in self._list_variables_to_fit:
            self._variable_to_fit = variable_to_fit
        else:
            raise ValueError('variable_to_fit must be one of {}'.format(
                self._list_variables_to_fit))

        self._fit_function_label = fit_function_label
        self._binType = 'fit'
        self._fFitname = self._set_full_filenames(['fit-' + variable_to_fit
                                                   ])[0]
        self._params.update({'z_max_pk': z_max_pk})
        self._fit_bounds = bounds
        self._p0 = p0
        list_fit_xvar = ['ln(1+z)', 'lna', 'a', '(1-a)']
        if xvar in list_fit_xvar:
            self._fit_xvar = xvar
        else:
            raise ValueError('xvar must be one of {}'.format(list_fit_xvar))

    def set_bins(self, zbins, abins):
        """
        Set what bins to use and reset to avoid confusions.
        """
        self.reset()
        self._zbins = zbins
        self._abins = abins
        self._binType = 'bins'
        self._fwzname, self._fwaname = self._set_full_filenames(
            ['wz-bins', 'wa-bins'])

    def _read_from_file(self, path):
        """
        Return params for class from files used in quintessence Marsh.
        """
        with open(path) as f:
            f.readline()
            header = f.readline().split()[3:]  # Remove '#', 'w0', and 'wa'

        columns = np.loadtxt(path, unpack=True)[2:]  # Remove columns w0, wa

        for index_h, head in enumerate(header):
            if head[-1] == 'h':
                break

        self.params_smg = zip(*columns[:index_h])
        self.params_2_smg = [
            list(row[~np.isnan(row)])
            for row in np.array(zip(*columns[index_h + 2:]))
        ]
        self.h = columns[index_h]
        self.Omega_cdm = columns[index_h + 1]

        self.gravity_model = os.path.basename(path).split('-')[0]

    def _params_from_row(self, row):
        """
        Set parameters.
        """
        params = self._params
        params.update({
            'parameters_smg': str(self.params_smg[row]).strip('[()]'),
            'h': self.h[row],
            'Omega_cdm': self.Omega_cdm[row],
            'gravity_model': self.gravity_model
        })

        if len(self.params_2_smg):
            params.update({
                'parameters_2_smg':
                str(self.params_2_smg[row]).strip('[()]')
            })

        return params

    def compute_bins(self, params):
        """
        Compute the w_i bins for the model with params.
        """
        wzbins = np.empty(len(self._zbins))
        wabins = np.empty(len(self._abins))
        self._params = params
        self._cosmo.set(params)
        try:
            self._cosmo.compute()
            for n, z in enumerate(self._zbins):
                wzbins[n] = self._cosmo.w_smg(z)
            for n, a in enumerate(self._abins):
                wabins[n] = self._cosmo.w_smg(1. / a - 1.)
            shoot = self._cosmo.get_current_derived_parameters(
                ['tuning_parameter'])['tuning_parameter']
        except Exception as e:
            self._cosmo.struct_cleanup()
            self._cosmo.empty()
            raise e

        self._cosmo.struct_cleanup()
        self._cosmo.empty()

        return wzbins, wabins, shoot

    def _compute_common_init(self, params):
        """
        Common first steps for compute methods
        """
        self._params.update(params)
        self._cosmo.set(self._params)

        try:
            self._cosmo.compute()
            b = self._cosmo.get_background()
            shoot = self._cosmo.get_current_derived_parameters(
                ['tuning_parameter'])['tuning_parameter']
        except Exception as e:
            self._cosmo.struct_cleanup()
            self._cosmo.empty()
            raise e

        return b, shoot

    def _fit(self, X, Y):
        """
        Fits self._fit_function to X, Y, with self._n_coeffs.
        """
        return wicm.fit(self._fit_function,
                        X,
                        Y,
                        self._n_coeffs,
                        bounds=self._fit_bounds,
                        p0=self._p0)

    def _get_fit_xvar_and_log1Plusz(self, z):
        """
        Return the X array to fit and log(1+z)
        """
        if self._fit_xvar == 'ln(1+z)':
            X = np.log(z + 1)
            lna = X
        elif self._fit_xvar == 'lna':
            X = -np.log(z + 1)
            lna = -X
        elif self._fit_xvar == 'a':
            X = 1. / (1 + z)
            lna = np.log(z + 1)
        elif self._fit_xvar == '(1-a)':
            X = (z / (1 + z))
            lna = np.log(z + 1)

        return X, lna

    def compute_fit_coefficients_for_F(self, params):
        """
        Returns the coefficients of the polynomial fit of f(a) = \int w and the
        maximum and minimum residual in absolute value.
        """
        b, shoot = self._compute_common_init(params)

        # Compute the exact \int dlna a
        ###############################
        z, w = b['z'], b['w_smg']

        Fint = []
        lna = -np.log(1 + z)[::-1]
        for i in lna:
            Fint.append(integrate.trapz(w[::-1][lna >= i], lna[lna >= i]))
        Fint = np.array(Fint)

        #####

        zlim = self._params['z_max_pk']
        # Note that lna is log(1+z). I used this name because is more convenient
        X, lna = self._get_fit_xvar_and_log1Plusz(z[z <= zlim])

        #####################
        zTMP = z[z <= zlim]
        Y1 = Fint[::-1][z < zlim]  # Ordered as in CLASS

        #####################

        # Fit to fit_function
        #####################
        popt1, yfit1 = self._fit(X, Y1)

        # Obtain max. rel. dev. for DA and f.
        #####################

        rhoDE_fit = b['(.)rho_smg'][-1] * np.exp(-3 * yfit1) * (
            1 + zTMP)**3  ###### CHANGE WITH CHANGE OF FITTED THING

        Xw_fit, w_fit = wicm.diff(lna, yfit1)
        w_fit = -interp1d(
            Xw_fit, w_fit, bounds_error=False, fill_value='extrapolate')(lna)

        DA_reldev, f_reldev = self._compute_maximum_relative_error_DA_f(
            rhoDE_fit, w_fit)

        # Free structures
        ###############
        self._cosmo.struct_cleanup()
        self._cosmo.empty()

        return np.concatenate([popt1, [DA_reldev, f_reldev]]), shoot

    def compute_fit_coefficients_for_logX(self, params):
        """
        Returns the coefficients of the polynomial fit of log(rho/rho_0) = -3
        \int dlna (w+1) and the maximum and minimum residual in absolute value.
        """
        b, shoot = self._compute_common_init(params)

        # Compute the exact -3 \int dlna (w + 1)
        ###############################
        z = b['z']

        logX = np.log(b['(.)rho_smg'] / b['(.)rho_smg'][-1])

        #####

        zlim = self._params['z_max_pk']
        # Note that lna is log(1+z). I used this name because is more convenient
        X, lna = self._get_fit_xvar_and_log1Plusz(z[z <= zlim])

        #####################
        Y1 = logX[z <= zlim]

        #####################

        # Fit to fit_function
        #####################
        popt1, yfit1 = self._fit(X, Y1)

        # Obtain max. rel. dev. for DA and f.
        #####################

        rhoDE_fit = b['(.)rho_smg'][-1] * np.exp(
            yfit1)  ###### CHANGE WITH CHANGE OF FITTED THING

        Xw_fit, ThreewPlus1 = wicm.diff(lna, yfit1)
        w_fit = ThreewPlus1 / 3. - 1  # The minus sign is taken into account by the CLASS ordering.
        w_fit = interp1d(Xw_fit,
                         w_fit,
                         bounds_error=False,
                         fill_value='extrapolate')(lna)

        DA_reldev, f_reldev = self._compute_maximum_relative_error_DA_f(
            rhoDE_fit, w_fit)

        # Free structures
        ###############
        self._cosmo.struct_cleanup()
        self._cosmo.empty()

        return np.concatenate([popt1, [DA_reldev, f_reldev]]), shoot

    def compute_fit_coefficients_for_X(self, params):
        """
        Returns the coefficients of the polynomial fit of rho/rho_0 = exp[-3
        \int dlna (w+1)] and the maximum and minimum residual in absolute value.
        """
        b, shoot = self._compute_common_init(params)

        # Compute the exact -3 \int dlna (w + 1)
        ###############################
        z = b['z']

        Y = b['(.)rho_smg'] / b['(.)rho_smg'][-1]

        #####

        zlim = self._params['z_max_pk']
        # Note that lna is log(1+z). I used this name because is more convenient
        X, lna = self._get_fit_xvar_and_log1Plusz(z[z <= zlim])

        #####################
        Y1 = Y[z <= zlim]

        #####################

        # Fit to fit_function
        #####################
        popt1, yfit1 = self._fit(X, Y1)

        # Obtain max. rel. dev. for DA and f.
        #####################

        rhoDE_fit = b['(.)rho_smg'][
            -1] * yfit1  ###### CHANGE WITH CHANGE OF FITTED THING

        Xw_fit, diff = wicm.diff(lna, yfit1)
        diff = interp1d(Xw_fit,
                        diff,
                        bounds_error=False,
                        fill_value='extrapolate')(lna)
        ThreewPlus1 = diff / yfit1
        w_fit = ThreewPlus1 / 3. - 1  # The minus sign is taken into account by the CLASS ordering.

        DA_reldev, f_reldev = self._compute_maximum_relative_error_DA_f(
            rhoDE_fit, w_fit)

        # Free structures
        ###############
        self._cosmo.struct_cleanup()
        self._cosmo.empty()

        return np.concatenate([popt1, [DA_reldev, f_reldev]]), shoot

    def compute_fit_coefficients_for_logRho(self, params):
        """
        Returns the coefficients of the fit of ln(rho_de) and the maximum and
        minimum residual in absolute value.
        """
        b, shoot = self._compute_common_init(params)

        # Compute the exact log(rho)
        ###############################
        z = b['z']

        logX = np.log(b['(.)rho_smg'])

        #####

        zlim = self._params['z_max_pk']
        # Note that lna is log(1+z). I used this name because is more convenient
        X, lna = self._get_fit_xvar_and_log1Plusz(z[z <= zlim])

        #####################
        Y1 = logX[z <= zlim]

        #####################

        # Fit to fit_function
        #####################

        popt1, yfit1 = self._fit(X, Y1)

        # Obtain max. rel. dev. for DA and f.
        #####################

        rhoDE_fit = np.exp(yfit1)  ###### CHANGE WITH CHANGE OF FITTED THING

        Xw_fit, ThreewPlus1 = wicm.diff(lna, yfit1 - yfit1[-1])
        w_fit = ThreewPlus1 / 3. - 1  # The minus sign is taken into account by the CLASS ordering.
        w_fit = interp1d(Xw_fit,
                         w_fit,
                         bounds_error=False,
                         fill_value='extrapolate')(lna)

        DA_reldev, f_reldev = self._compute_maximum_relative_error_DA_f(
            rhoDE_fit, w_fit)

        # Free structures
        ###############
        self._cosmo.struct_cleanup()
        self._cosmo.empty()

        return np.concatenate([popt1, [DA_reldev, f_reldev]]), shoot

    def compute_fit_coefficients_for_w(self, params):
        """
        Returns the coefficients of the polynomial fit of f(a) = \int w and the
        maximum and minimum residual in absolute value.
        """
        b, shoot = self._compute_common_init(params)

        # Compute the exact \int dlna a
        ###############################
        z, w = b['z'], b['w_smg']

        zlim = self._params['z_max_pk']
        # Note that lna is log(1+z). I used this name because is more convenient
        X, lna = self._get_fit_xvar_and_log1Plusz(z[z <= zlim])

        #####################
        zTMP = z[z <= zlim]
        Y1 = w[z <= zlim]

        # Fit to fit_function
        #####################
        popt1, yfit1 = self._fit(X, Y1)

        # Obtain max. rel. dev. for DA and f.
        #####################
        Fint = []
        lna = -np.log(1 + zTMP)[::-1]
        for i in lna:
            Fint.append(integrate.trapz(yfit1[::-1][lna >= i], lna[lna >= i]))
        Fint = np.array(Fint[::-1])

        rhoDE_fit = b['(.)rho_smg'][-1] * np.exp(
            -3 * Fint) * (1 + zTMP)**3  # CHANGE WITH CHANGE OF FITTED THING

        # TODO: needed?
        # Xw_fit, w_fit = X, yfit1
        # w_fit = interp1d(Xw_fit, w_fit, bounds_error=False, fill_value='extrapolate')(X)
        w_fit = yfit1

        DA_reldev, f_reldev = self._compute_maximum_relative_error_DA_f(
            rhoDE_fit, w_fit)

        # Free structures
        ###############
        self._cosmo.struct_cleanup()
        self._cosmo.empty()

        return np.concatenate([popt1, [DA_reldev, f_reldev]]), shoot

    def _compute_maximum_relative_error_DA_f(self, rhoDE_fit, w_fit):
        """
        Return the relative error for the diameter angular distance and the
        growth factor, f.

        rhoDE_fit = array
        wfit = interp1d(w)
        """

        b = self._cosmo.get_background()

        # Compute the exact growth rate
        #####################
        z_max_pk = self._params['z_max_pk']
        zlim = z_max_pk
        z, w = b['z'], b['w_smg']
        zTMP = z[z <= zlim]
        rhoM = (b['(.)rho_b'] + b['(.)rho_cdm'])
        rhoR = (b['(.)rho_g'] + b['(.)rho_ur'])
        DA = b['ang.diam.dist.']

        OmegaDEwF_exact = interp1d(z[z <= z_max_pk],
                                   (b['(.)rho_smg'] / b['(.)rho_crit'] *
                                    w)[z <= z_max_pk])
        OmegaMF = interp1d(z[z <= z_max_pk],
                           (rhoM / b['(.)rho_crit'])[z <= z_max_pk])

        time_boundaries = [z[z <= z_max_pk][0], z[z <= z_max_pk][-1]]

        # Use LSODA integrator as some solutions were wrong with RK45 and OK
        # with this.
        f = integrate.solve_ivp(
            cosmo_extra.fprime(OmegaDEwF_exact, OmegaMF),
            time_boundaries,
            [cosmo_extra.growthrate_at_z(self._cosmo, z_max_pk)],
            method='LSODA',
            dense_output=True)

        # Compute D_A for fitted model
        ################
        H_fit = np.sqrt(rhoM[z <= zlim] + rhoR[z <= zlim] + rhoDE_fit)

        DA_fit = cosmo_extra.angular_distance(z[z <= zlim],
                                              H_fit[zTMP <= zlim])

        # Compute the growth rate for fitted model
        ###############

        OmegaMF_fit = interp1d(
            zTMP, 1 - rhoDE_fit / H_fit**2 - rhoR[z <= zlim] /
            H_fit**2)  ####### THIS FITS OBSERVABLES CORRECTLY
        # OmegaMF_fit = interp1d(zTMP, rhoM[z<=zlim]/H_fit**2)      ####### THIS FITS OBSERVABLES CORRECTLY
        OmegaDEwF_fit = interp1d(zTMP, rhoDE_fit / H_fit**2 * w_fit)

        f_fit = integrate.solve_ivp(
            cosmo_extra.fprime(OmegaDEwF_fit,
                               OmegaMF_fit), [zTMP[0], zTMP[-1]],
            [cosmo_extra.growthrate_at_z(self._cosmo, zTMP[0])],
            method='LSODA',
            dense_output=True)

        # Obtain rel. deviations.
        ################

        # Remove close to 0 points as rel.dev diverges. z = 0.05 is the lowest
        # redshift observed and is done in BOSS survey. arXiv: 1308.4164
        # DA_reldev = max(np.abs(DA_fit[zTMP>=0.04]/DA[ (z>=0.04) & (z<=zlim)] - 1))
        DA_reldev = max(np.abs(DA_fit / DA[z <= zlim] - 1))
        f_reldev = max(np.abs(f_fit.sol(zTMP)[0] / f.sol(zTMP)[0] - 1))

        return DA_reldev, f_reldev

    def compute_Pade_coefficients(self, params):
        """
        Returns the Pade coefficients for w computed from params and the maximum
        and minimum residual in absolute value.
        """
        self._params = params
        self._cosmo.set(params)

        try:
            self._cosmo.compute()
            b = self._cosmo.get_background()
            shoot = self._cosmo.get_current_derived_parameters(
                ['tuning_parameter'])['tuning_parameter']
        except Exception as e:
            self._cosmo.struct_cleanup()
            self._cosmo.empty()
            raise e

        self._cosmo.struct_cleanup()
        self._cosmo.empty()

        xDict = {
            'z': b['z'],
            'z+1': b['z'] + 1,
            'a': 1. / (b['z'] + 1),
            'log(a)': -np.log(b['z'] + 1),
            'log(z+1)': np.log(b['z'] + 1)
        }

        X = xDict[self._Pade_xvar]
        w = b['w_smg']

        if self._Pade_xReverse:
            X = X[::-1]
            w = w[::-1]

        PadeOrder = np.array(self._PadeOrder)

        if not self._Pade_increase:
            reduceOrder = [[0, 0], [1, 0], [0, 1], [2, 0], [2, 1], [3, 1]]
            orderList = PadeOrder - reduceOrder

        else:
            orderList = [[1, 1], [2, 0], [3, 0], [2, 1], [2, 2], [3,
                                                                  1], [4, 0],
                         [2, 3], [3, 2], [4, 1], [5, 0], [3, 3], [4,
                                                                  2], [5, 1],
                         [3, 4], [4, 3], [5, 2], [3, 5], [4, 4], [5, 3],
                         [4, 5], [5, 4], [5, 5]]

        r = np.array([np.inf])
        for order in orderList:
            # Increase order of Pade up to [5/5].
            try:
                padeCoefficientsTMP, padeFitTMP = fit_pade(
                    X, w, *order, maxfev=self._Pade_maxfev)
                rTMP = np.abs(padeFitTMP / w - 1.)
                if self._Pade_increase and (np.max(rTMP) >
                                            self._Pade_accuracy):
                    if np.max(rTMP) < np.max(r):
                        padeCoefficients = padeCoefficientsTMP
                        r = rTMP
                    continue
                else:
                    padeCoefficients = padeCoefficientsTMP
                    r = rTMP
                    break
            except Exception as e:
                if (order == orderList[-1]) and (len(r) == 1):
                    raise e

                continue

        zeros = (PadeOrder - order)

        numCoefficients = np.append(padeCoefficients[:order[0] + 1],
                                    [0.] * zeros[0])
        denCoefficients = np.append(padeCoefficients[order[0] + 1:],
                                    [0.] * zeros[1])
        padeCoefficients = np.concatenate([numCoefficients, denCoefficients])

        return np.concatenate([padeCoefficients, [np.min(r),
                                                  np.max(r)]]), shoot

    def compute_bins_from_params(self, params_func, number_of_rows):
        """
        Compute the w_i bins for the models given by the function
        params_func iterated #iterations.
        """
        self._create_output_files()

        wzbins = []
        wabins = []
        params = []
        shoot = []

        for row in range(number_of_rows):
            sys.stdout.write("{}/{}\n".format(row + 1, number_of_rows))
            params_tmp = params_func().copy()

            try:
                wzbins_tmp, wabins_tmp, shoot_tmp = self.compute_bins(
                    params_tmp)
                wzbins.append(wzbins_tmp)
                wabins.append(wabins_tmp)
                params.append(params_tmp)
                shoot.append(shoot_tmp)
                # Easily generalizable. It could be inputted a list with the
                # desired derived parameters and store the whole dictionary.
            except Exception as e:
                sys.stderr.write(str(self._params) + '\n')
                sys.stderr.write(str(e))
                sys.stderr.write('\n')
                continue

            if len(wzbins) == 5:
                self._save_computed(params, shoot, [wzbins, wabins])

                params = []
                wzbins = []
                wabins = []
                shoot = []

        self._save_computed(params, shoot, [wzbins, wabins])

    def compute_Pade_from_params(self, params_func, number_of_rows):
        """
        Compute the w_i bins for the models given by the function
        params_func iterated #iterations.
        """
        self._create_output_files()

        wbins = []
        params = []
        shoot = []

        for row in range(number_of_rows):
            sys.stdout.write("{}/{}\n".format(row + 1, number_of_rows))
            params_tmp = params_func().copy()

            try:
                wbins_tmp, shoot_tmp = self.compute_Pade_coefficients(
                    params_tmp)
                wbins.append(wbins_tmp)
                params.append(params_tmp)
                shoot.append(shoot_tmp)
                # Easily generalizable. It could be inputted a list with the
                # desired derived parameters and store the whole dictionary.
            except Exception as e:
                sys.stderr.write(str(self._params) + '\n')
                sys.stderr.write(str(e))
                sys.stderr.write('\n')
                continue

            if len(wbins) == 5:
                self._save_computed(params, shoot, wbins)

                params = []
                wbins = []
                shoot = []

        self._save_computed(params, shoot, wbins)

    def compute_fit_from_params(self, params_func, number_of_rows):
        """
        Compute the fit for the models given by the function
        params_func iterated #iterations.

        The variable to fit is chosen in self.set_fit
        """
        # TODO: If this grows, consider creating a separate method
        if self._variable_to_fit == 'F':
            fit_variable_function = self.compute_fit_coefficients_for_F
        elif self._variable_to_fit == 'w':
            fit_variable_function = self.compute_fit_coefficients_for_w
        elif self._variable_to_fit == 'logRho':
            fit_variable_function = self.compute_fit_coefficients_for_logRho
        elif self._variable_to_fit == 'logX':
            fit_variable_function = self.compute_fit_coefficients_for_logX
        elif self._variable_to_fit == 'X':
            fit_variable_function = self.compute_fit_coefficients_for_X

        self._create_output_files()

        coeffs = []
        params = []
        shoot = []

        for row in range(number_of_rows):
            sys.stdout.write("{}/{}\n".format(row + 1, number_of_rows))
            # params_tmp = params_func().copy()

            try:
                coeffs_tmp, shoot_tmp = fit_variable_function(params_func())
                coeffs.append(coeffs_tmp)
                params.append(self._params.copy())
                shoot.append(shoot_tmp)
                # Easily generalizable. It could be inputted a list with the
                # desired derived parameters and store the whole dictionary.
            except Exception as e:
                sys.stderr.write(str(self._params) + '\n')
                sys.stderr.write(str(e))
                sys.stderr.write('\n')
                continue

            if len(coeffs) == 5:
                self._save_computed(params, shoot, coeffs)

                params = []
                coeffs = []
                shoot = []

        self._save_computed(params, shoot, coeffs)

    def compute_bins_from_file(self, path):
        """
        Compute the w_i bins for the models given in path.
        """
        if self._computed is True:
            print(
                "Bins already computed. Use reset if you want to compute it again"
            )
            return

        self._path = path

        self._read_from_file(path)

        def params_gen(length):
            row = 0
            while row < length:
                yield self._params_from_row(row)
                row += 1

        params = params_gen(len(self.params_smg))

        self.compute_bins_from_params(params.next, len(self.params_smg))

    def _create_output_files(self):
        """
        Initialize the output files.
        """
        # TODO: Add check if files exist
        with open(self._fparamsname, 'a') as f:
            f.write('# ' + "Dictionary of params to use with cosmo.set()" +
                    '\n')

        with open(self._fshootname, 'a') as f:
            f.write('# ' + "Shooting variable value" + '\n')

        if self._binType == 'bins':
            with open(self._fwzname, 'a') as f:
                f.write('# ' + "Bins on redshift" + '\n')
                f.write('# ' + str(self._zbins).strip('[]').replace('\n', '') +
                        '\n')

            with open(self._fwaname, 'a') as f:
                f.write('# ' + "Bins on scale factor" + '\n')
                f.write('# ' + str(self._abins).strip('[]').replace('\n', '') +
                        '\n')
        elif self._binType == 'Pade':
            with open(self._fPadename, 'a') as f:
                f.write('# ' + "Pade fit for temporal variable {} \n".format(
                    self._Pade_xvar))
                coeff_header_num = [
                    'num_{}'.format(n) for n in range(self._PadeOrder[0] + 1)
                ]
                coeff_header_den = [
                    'den_{}'.format(n + 1) for n in range(self._PadeOrder[1])
                ]
                res_header = ['min(residual)', 'max(residual)']
                f.write('# ' + ' '.join(coeff_header_num + coeff_header_den +
                                        res_header) + '\n')
        elif self._binType == 'fit':
            with open(self._fFitname, 'a') as f:
                f.write('# ' +
                        "{} fit for temporal variable {} of {}\n".format(
                            self._fit_function_label, self._fit_xvar,
                            self._variable_to_fit))
                coeff_header_num = [
                    'c_{}'.format(n) for n in range(self._n_coeffs)
                ]
                res_header = ['max(rel.dev. D_A)', 'max(rel.dev. f)']
                f.write('# ' + ' '.join(coeff_header_num + res_header) + '\n')

    def _save_computed(self, params, shoot, wbins):
        """
        Save stored iterations in file.
        """
        with open(self._fparamsname, 'a') as f:
            for i in params:
                f.write(str(i) + '\n')

        with open(self._fshootname, 'a') as f:
            np.savetxt(f, shoot)

        if self._binType == 'bins':
            wzbins, wabins = wbins
            with open(self._fwzname, 'a') as f:
                np.savetxt(f, wzbins)

            with open(self._fwaname, 'a') as f:
                np.savetxt(f, wabins)
        elif self._binType == 'Pade':
            with open(self._fPadename, 'a') as f:
                np.savetxt(f, wbins)
        elif self._binType == 'fit':
            with open(self._fFitname, 'a') as f:
                np.savetxt(f, wbins)

    def reset(self):
        """
        Reset class
        """
        self._cosmo.struct_cleanup()
        self._cosmo.empty()
        self._set_default_values()
Пример #40
0
def calculate_derivative_param(params, param_to_test, param_fiducial,
                               percentage_difference, param_list, der_param,
                               write_file, i):
    cosmo = Class()  # Create an instance of the CLASS wrapper
    param = np.zeros((N, 3, len(param_list)))
    for j in range(3):
        # going over a_c and Om_fld values
        # if j==0:
        # 	params['Omega_fld_ac'] = Omega_ac_fid[i]
        # if j==1:
        # 	params['Omega_fld_ac'] = Omega_ac_fid[i]+percentage_difference*Omega_ac_fid[i]
        # if j==2:
        # 	params['Omega_fld_ac'] = Omega_ac_fid[i]-percentage_difference*Omega_ac_fid[i]
        if param_to_test == 'scf_parameters':
            if j == 0:
                params['scf_parameters'] = '%.5f,0.0' % (param_fiducial)
            if j == 1:
                params['scf_parameters'] = '%.5f,0.0' % (
                    param_fiducial + percentage_difference * param_fiducial)
            if j == 2:
                params['scf_parameters'] = '%.5f,0.0' % (
                    param_fiducial - percentage_difference * param_fiducial)
        else:
            if j == 0:
                params[param_to_test] = param_fiducial
            if j == 1:
                params[
                    param_to_test] = param_fiducial + percentage_difference * param_fiducial
            if j == 2:
                params[
                    param_to_test] = param_fiducial - percentage_difference * param_fiducial
        # if j==0:
        # 	params['Omega_many_fld'] = Omega0_fld
        # if j==1:
        # 	params['Omega_many_fld'] = Omega0_fld+percentage_difference*Omega0_fld
        # if j==2:
        # 	params['Omega_many_fld'] = Omega0_fld-percentage_difference*Omega0_fld
        print params[param_to_test], param_fiducial
        # try to solve with a certain cosmology, no worries if it cannot
        # l_theta_s = np.pi/(theta_s)
        # print "here:",l_theta_s
        # try:
        cosmo.set(params)  # Set the parameters to the cosmological code
        cosmo.compute()  # solve physics
        cl = cosmo.lensed_cl(2500)
        ell = cl['ell'][2:]
        tt = cl['tt'][2:]
        fTT = interp1d(ell, tt * ell * (ell + 1))

        for k in range(len(param_list)):
            print 'k', k, len(param_list)
            #calculate height peak difference
            if (param_list[k] == 'HP'):
                # param[i][j][k] = max(tt)-fTT(10)
                param[i][j][k] = max(tt * ell * (ell + 1))
            elif (param_list[k] == 'rs_rec'):
                param[i][j][k] = cosmo.rs_rec()
            elif (param_list[k] == 'rd_rec'):
                param[i][j][k] = cosmo.rd_rec()
            elif (param_list[k] == 'rs_rec_over_rd_rec'):
                # print cosmo.rs_rec()/cosmo.rd_rec()
                param[i][j][k] = cosmo.rs_rec() / cosmo.rd_rec()
            elif (param_list[k] == 'da_rec'):
                param[i][j][k] = cosmo.da_rec()
            elif (param_list[k] == 'theta_s'):
                param[i][j][k] = cosmo.theta_s()
            elif (param_list[k] == 'H0'):
                param[i][j][k] = cosmo.Hubble(0)

        print max(tt * ell * (ell + 1)), cosmo.theta_s(), cosmo.da_rec(
        ), cosmo.rs_rec(), cosmo.z_rec()
        # for l in range(len(tt)):
        # 	# print l, tt[l], max(tt*ell*(ell+1))
        # 	if tt[l]*ell[l]*(ell[l]+1) == max(tt*ell*(ell+1)):
        # 		print "l:", l,max(tt*ell*(ell+1))

        # except CosmoComputationError: # this happens when CLASS fails
        # 	print CosmoComputationError
        # 	pass # eh, don't do anything

        #calculate derivative
        # der_HP[i]= (HP[i][1]-HP[i][2])/(Omega_ac_fid[i]+percentage_difference*Omega_ac_fid[i]-(Omega_ac_fid[i]-percentage_difference*Omega_ac_fid[i]))*Omega_ac_fid[0]/HP[i][0]

        cosmo.empty()
        cosmo.struct_cleanup()
    if write_file == True:
        f.write(str(ac_values[i]) + '\t\t')
    print "calculating derivative"
    for k in range(len(param_list)):
        # der_HP[i]= (HP[i][1]-HP[i][2])/(fraction_fiducial+percentage_difference*fraction_fiducial-(fraction_fiducial-percentage_difference*fraction_fiducial))*fraction_fiducial/HP[i][0]
        der_param[i][k] = (param[i][1][k] - param[i][2][k]) / (
            param_fiducial + percentage_difference * param_fiducial -
            (param_fiducial - percentage_difference * param_fiducial)
        ) * param_fiducial / param[i][0][k]
        if write_file == True:
            f.write(str(der_param[i][k]) + '\t\t')  # info on format
        print param_list[k], der_param[i][k]
    if write_file == True:
        f.write('\n')
    return
Пример #41
0
class TestClass(unittest.TestCase):
    """
    Testing Class and its wrapper classy on different cosmologies

    To run it, do
    ~] nosetest test_class.py

    It will run many times Class, on different cosmological scenarios, and
    everytime testing for different output possibilities (none asked, only mPk,
    etc..)

    """
    @classmethod
    def setUpClass(cls):
        cls.faulty_figs_path = os.path.join(
            os.path.sep.join(
                os.path.realpath(__file__).split(os.path.sep)[:-1]),
            'faulty_figs')

        if os.path.isdir(cls.faulty_figs_path):
            shutil.rmtree(cls.faulty_figs_path)

        os.mkdir(cls.faulty_figs_path)

    @classmethod
    def tearDownClass(cls):
        pass

    def setUp(self):
        """
        set up data used in the tests.
        setUp is called before each test function execution.
        """
        self.cosmo = Class()
        self.cosmo_newt = Class()

        if CLASS_VERBOSE:
            self.verbose = {
                'input_verbose': 1,
                'background_verbose': 1,
                'thermodynamics_verbose': 1,
                'perturbations_verbose': 1,
                'transfer_verbose': 1,
                'primordial_verbose': 1,
                'harmonic_verbose': 1,
                'fourier_verbose': 1,
                'lensing_verbose': 1,
                'distortions_verbose': 1,
                'output_verbose': 1,
            }
        else:
            self.verbose = {}
        self.scenario = {}

    def tearDown(self):
        self.cosmo.struct_cleanup()
        self.cosmo.empty()
        self.cosmo = 0
        self.cosmo_newt.struct_cleanup()
        self.cosmo_newt.empty()
        self.cosmo_newt = 0
        del self.scenario

    def poormansname(self, somedict):
        string = "_".join(
            [k + '=' + str(v) for k, v in list(somedict.items())])
        string = string.replace('/', '%')
        string = string.replace(',', '')
        string = string.replace(' ', '')
        return string

    @parameterized.expand(TUPLE_ARRAY,
                          doc_func=custom_name_func,
                          custom_name_func=custom_name_func)
    @attr('dump_ini_files')
    def test_Valgrind(self, inputdict):
        """Dump files"""
        self.scenario.update(inputdict)
        self.name = self._testMethodName
        if self.has_incompatible_input():
            return
        path = os.path.join(self.faulty_figs_path, self.name)
        self.store_ini_file(path)
        self.scenario.update({'gauge': 'Newtonian'})
        self.store_ini_file(path + 'N')

    @parameterized.expand(TUPLE_ARRAY,
                          doc_func=custom_name_func,
                          custom_name_func=custom_name_func)
    @attr('test_scenario')
    def test_scenario(self, inputdict):
        """Test scenario"""
        self.scenario.update(inputdict)
        self.name = self._testMethodName
        self.cosmo.set(
            dict(itertools.chain(self.verbose.items(), self.scenario.items())))

        cl_dict = {
            'tCl': ['tt'],
            'lCl': ['pp'],
            'pCl': ['ee', 'bb'],
            'nCl': ['dd'],
            'sCl': ['ll'],
        }

        # 'lensing' is always set to yes. Therefore, trying to compute 'tCl' or
        # 'pCl' will fail except if we also ask for 'lCl'.
        if self.has_incompatible_input():
            self.assertRaises(CosmoSevereError, self.cosmo.compute)
            return
        else:
            self.cosmo.compute()

        self.assertTrue(self.cosmo.state,
                        "Class failed to go through all __init__ methods")
        # Depending
        if 'output' in self.scenario.keys():
            # Positive tests of raw cls
            output = self.scenario['output']
            for elem in output.split():
                if elem in cl_dict.keys():
                    for cl_type in cl_dict[elem]:
                        is_density_cl = (elem == 'nCl' or elem == 'sCl')
                        if is_density_cl:
                            cl = self.cosmo.density_cl(100)
                        else:
                            cl = self.cosmo.raw_cl(100)
                        self.assertIsNotNone(cl, "raw_cl returned nothing")
                        cl_length = np.shape(
                            cl[cl_type][0])[0] if is_density_cl else np.shape(
                                cl[cl_type])[0]
                        self.assertEqual(cl_length, 101,
                                         "raw_cl returned wrong size")
                if elem == 'mPk':
                    pk = self.cosmo.pk(0.1, 0)
                    self.assertIsNotNone(pk, "pk returned nothing")
            # Negative tests of output functions
            if not any(
                [elem in list(cl_dict.keys()) for elem in output.split()]):
                # testing absence of any Cl
                self.assertRaises(CosmoSevereError, self.cosmo.raw_cl, 100)
            if 'mPk' not in output.split():
                # testing absence of mPk
                self.assertRaises(CosmoSevereError, self.cosmo.pk, 0.1, 0)

        if COMPARE_OUTPUT_REF or COMPARE_OUTPUT_GAUGE:
            # Now compute same scenario in Newtonian gauge
            self.cosmo_newt.set(
                dict(list(self.verbose.items()) + list(self.scenario.items())))
            self.cosmo_newt.set({'gauge': 'newtonian'})
            self.cosmo_newt.compute()

        if COMPARE_OUTPUT_GAUGE:
            # Compare synchronous and Newtonian gauge
            self.assertTrue(
                self.cosmo_newt.state,
                "Class failed to go through all __init__ methods in Newtonian gauge"
            )

            self.compare_output(self.cosmo, "Synchronous", self.cosmo_newt,
                                'Newtonian', COMPARE_CL_RELATIVE_ERROR_GAUGE,
                                COMPARE_PK_RELATIVE_ERROR_GAUGE)

        if COMPARE_OUTPUT_REF:
            # Compute reference models in both gauges and compare
            cosmo_ref = classyref.Class()
            cosmo_ref.set(self.cosmo.pars)
            cosmo_ref.compute()
            status = self.compare_output(cosmo_ref, "Reference", self.cosmo,
                                         'Synchronous',
                                         COMPARE_CL_RELATIVE_ERROR,
                                         COMPARE_PK_RELATIVE_ERROR)
            assert status, 'Reference comparison failed in Synchronous gauge!'

            cosmo_ref = classyref.Class()
            cosmo_ref.set(self.cosmo_newt.pars)
            cosmo_ref.compute()
            self.compare_output(cosmo_ref, "Reference", self.cosmo_newt,
                                'Newtonian', COMPARE_CL_RELATIVE_ERROR,
                                COMPARE_PK_RELATIVE_ERROR)
            assert status, 'Reference comparison failed in Newtonian gauge!'

    def has_incompatible_input(self):

        should_fail = False

        # If we have tensor modes, we must have one tensor observable,
        # either tCl or pCl.
        if has_tensor(self.scenario):
            if 'output' not in list(self.scenario.keys()):
                should_fail = True
            else:
                output = self.scenario['output'].split()
                if 'tCl' not in output and 'pCl' not in output:
                    should_fail = True

        # If we have specified lensing, we must have lCl in output,
        # otherwise lensing will not be read (which is an error).
        if 'lensing' in list(self.scenario.keys()):
            if 'output' not in list(self.scenario.keys()):
                should_fail = True
            else:
                output = self.scenario['output'].split()
                if 'lCl' not in output:
                    should_fail = True
                elif 'tCl' not in output and 'pCl' not in output:
                    should_fail = True

        # If we have specified a tensor method, we must have tensors.
        if 'tensor method' in list(self.scenario.keys()):
            if not has_tensor(self.scenario):
                should_fail = True

        # If we have specified non linear, we must have some form of
        # perturbations output.
        if 'non linear' in list(self.scenario.keys()):
            if 'output' not in list(self.scenario.keys()):
                should_fail = True

        # If we ask for Cl's of lensing potential, number counts or cosmic shear, we must have scalar modes.
        # The same applies to density and velocity transfer functions and the matter power spectrum:
        if 'output' in self.scenario and 'modes' in self.scenario and self.scenario[
                'modes'].find('s') == -1:
            requested_output_types = set(self.scenario['output'].split())
            for scalar_output_type in [
                    'lCl', 'nCl', 'dCl', 'sCl', 'mPk', 'dTk', 'mTk', 'vTk'
            ]:
                if scalar_output_type in requested_output_types:
                    should_fail = True
                    break

        # If we specify initial conditions (for scalar modes), we must have
        # perturbations and scalar modes.
        if 'ic' in list(self.scenario.keys()):
            if 'modes' in list(self.scenario.keys()
                               ) and self.scenario['modes'].find('s') == -1:
                should_fail = True
            if 'output' not in list(self.scenario.keys()):
                should_fail = True

        # If we use inflation module, we must have scalar modes,
        # tensor modes, no vector modes and we should only have adiabatic IC:
        if 'P_k_ini type' in list(self.scenario.keys(
        )) and self.scenario['P_k_ini type'].find('inflation') != -1:
            if 'modes' not in list(self.scenario.keys()):
                should_fail = True
            else:
                if self.scenario['modes'].find('s') == -1:
                    should_fail = True
                if self.scenario['modes'].find('v') != -1:
                    should_fail = True
                if self.scenario['modes'].find('t') == -1:
                    should_fail = True
            if 'ic' in list(self.scenario.keys()
                            ) and self.scenario['ic'].find('i') != -1:
                should_fail = True

        return should_fail

    def compare_output(self, reference, reference_name, candidate,
                       candidate_name, rtol_cl, rtol_pk):
        status_pass = True
        for elem in ['raw_cl', 'lensed_cl']:
            # Try to get the elem, but if they were not computed, a
            # CosmoComputeError should be raised. In this case, ignore the
            # whole block.
            try:
                to_test = getattr(candidate, elem)()
            except CosmoSevereError:
                continue
            ref = getattr(reference, elem)()
            for key, value in list(ref.items()):
                if key != 'ell':
                    # For all self spectra, try to compare allclose
                    if key[0] == key[1]:
                        # If it is a 'dd' or 'll', it is a dictionary.
                        if isinstance(value, dict):
                            for subkey in list(value.keys()):
                                try:
                                    np.testing.assert_allclose(
                                        value[subkey],
                                        to_test[key][subkey],
                                        rtol=rtol_cl,
                                        atol=COMPARE_CL_ABSOLUTE_ERROR)
                                except AssertionError:
                                    self.cl_faulty_plot(
                                        elem + "_" + key, value[subkey][2:],
                                        reference_name,
                                        to_test[key][subkey][2:],
                                        candidate_name, rtol_cl)
                                except TypeError:
                                    self.cl_faulty_plot(
                                        elem + "_" + key, value[subkey][2:],
                                        reference_name,
                                        to_test[key][subkey][2:],
                                        candidate_name, rtol_cl)
                        else:
                            try:
                                np.testing.assert_allclose(
                                    value,
                                    to_test[key],
                                    rtol=rtol_cl,
                                    atol=COMPARE_CL_ABSOLUTE_ERROR)
                            except (AssertionError, TypeError) as e:
                                self.cl_faulty_plot(elem + "_" + key,
                                                    value[2:], reference_name,
                                                    to_test[key][2:],
                                                    candidate_name, rtol_cl)
                                status_pass = False
                    # For cross-spectra, as there can be zero-crossing, we
                    # instead compare the difference.
                    else:
                        # First, we multiply each array by the biggest value
                        norm = max(
                            np.abs(value).max(),
                            np.abs(to_test[key]).max())
                        value *= norm
                        to_test[key] *= norm
                        try:
                            np.testing.assert_array_almost_equal(value,
                                                                 to_test[key],
                                                                 decimal=3)
                        except AssertionError:
                            self.cl_faulty_plot(elem + "_" + key, value[2:],
                                                reference_name,
                                                to_test[key][2:],
                                                candidate_name, rtol_cl)
                            status_pass = False

        if 'output' in list(self.scenario.keys()):
            if self.scenario['output'].find('mPk') != -1:
                # testing equality of Pk
                k = np.logspace(-2, log10(self.scenario['P_k_max_1/Mpc']), 50)
                reference_pk = np.array([reference.pk(elem, 0) for elem in k])
                candidate_pk = np.array([candidate.pk(elem, 0) for elem in k])
                try:
                    np.testing.assert_allclose(reference_pk,
                                               candidate_pk,
                                               rtol=rtol_pk,
                                               atol=COMPARE_PK_ABSOLUTE_ERROR)
                except AssertionError:
                    self.pk_faulty_plot(k, reference_pk, reference_name,
                                        candidate_pk, candidate_name, rtol_pk)
                    status_pass = False

        return status_pass

    def store_ini_file(self, path):
        parameters = dict(
            list(self.verbose.items()) + list(self.scenario.items()))
        with open(path + '.ini', 'w') as param_file:
            param_file.write('# ' + str(parameters) + '\n')
            if len(parameters) == 0:
                # CLASS complains if the .ini file does not do anything.
                param_file.write('write warnings = yes\n')
            for key, value in list(parameters.items()):
                param_file.write(key + " = " + str(value) + '\n')

    def cl_faulty_plot(self, cl_type, reference, reference_name, candidate,
                       candidate_name, rtol):
        path = os.path.join(self.faulty_figs_path, self.name)
        fig, axes = plt.subplots(2, 1, sharex=True)
        ell = np.arange(max(np.shape(candidate))) + 2
        factor = ell * (ell + 1) / (2 *
                                    np.pi) if cl_type[-2:] != 'pp' else ell**5
        axes[0].plot(ell, factor * reference, label=reference_name)
        axes[0].plot(ell, factor * candidate, label=candidate_name)
        axes[1].semilogy(ell,
                         100 * abs(candidate / reference - 1),
                         label=cl_type)
        axes[1].axhline(y=100 * rtol, color='k', ls='--')

        axes[-1].set_xlabel(r'$\ell$')
        if cl_type[-2:] == 'pp':
            axes[0].set_ylabel(r'$\ell^5 C_\ell^\mathrm{{{_cl_type}}}$'.format(
                _cl_type=cl_type[-2:].upper()))
        else:
            axes[0].set_ylabel(
                r'$\ell(\ell + 1)/(2\pi)C_\ell^\mathrm{{{_cl_type}}}$'.format(
                    _cl_type=cl_type[-2:].upper()))
        axes[1].set_ylabel('Relative error [%]')

        for ax in axes:
            ax.legend(loc='upper right')

        fig.tight_layout()
        fname = '{}_{}_{}_vs_{}.pdf'.format(path, cl_type, reference_name,
                                            candidate_name)
        fig.savefig(fname, bbox_inches='tight')
        plt.close(fig)

        # Store parameters (contained in self.scenario) to text file
        self.store_ini_file(path)

    def pk_faulty_plot(self, k, reference, reference_name, candidate,
                       candidate_name, rtol):
        path = os.path.join(self.faulty_figs_path, self.name)

        fig, axes = plt.subplots(2, 1, sharex=True)
        axes[0].loglog(k, k**1.5 * reference, label=reference_name)
        axes[0].loglog(k, k**1.5 * candidate, label=candidate_name)
        axes[0].legend(loc='upper right')

        axes[1].loglog(k, 100 * np.abs(candidate / reference - 1))
        axes[1].axhline(y=100 * rtol, color='k', ls='--')

        axes[-1].set_xlabel(r'$k\quad [\mathrm{Mpc}^{-1}]$')
        axes[0].set_ylabel(r'$k^\frac{3}{2}P(k)$')
        axes[1].set_ylabel(r'Relative error [%]')

        fig.tight_layout()
        fname = path + '_pk_{}_vs_{}.pdf'.format(reference_name,
                                                 candidate_name)
        fig.savefig(fname, bbox_inches='tight')
        plt.close(fig)

        # Store parameters (contained in self.scenario) to text file
        self.store_ini_file(path)
Пример #42
0
class Theory(object):

    __metaclass__ = ABCMeta

    def __init__(self):
        self.data = []
        self.data_compu_error = []
        self.data_guess_error = []
        self.name = self.__class__.__name__
        self.common_params_names = ["w0", "wa", "h", "Omega_cdm"]
        # The distribution limits are taken from Marsh et al.
        self.params_dists = {
            "h": [[0.6, 0.8], "U"],
            "Omega_cdm": [[0.15, 0.35], "U"]
        }
        self.params = {'Omega_Lambda': 0, 'Omega_fld': 0, 'Omega_smg': -1}
        # 'input_verbose': 10}
        self.parameters_smg = []
        self.parameters_2_smg = []
        self.parameters_2_smg_short = []
        self.cosmo = Class()

    def __header(self, kind):
        if kind == "noerror":
            params_names = self.common_params_names + self.model_params_names
        else:
            params_names = self.common_params_names[
                2:] + self.model_params_names

        header = ""
        for number, name in enumerate(params_names):
            header += "{}:{} ".format(number, name)

        return header.strip(
        ) + "\n"  # strip to remove trailin whitespace removed

    def header_noerror(self):
        self.header_noerror = self.__header("noerror")

    def header_error(self):
        self.header_error = self.__header("error")

    def header_intervals(self):
        header = ""
        for param, dist_list in self.params_dists.iteritems():
            if "log" in dist_list[1]:
                header += "log_10({}) \in {}{} ".format(
                    param, dist_list[1][3:], dist_list[0])
            else:
                header += "{} \in {}{} ".format(param, dist_list[1],
                                                dist_list[0])

        self.header_intervals = header + "\n"

    def update_headers(self):
        self.header_noerror()
        self.header_error()
        self.header_intervals()

    def update_model_params_names(self, model_params_names):
        self.model_params_names = model_params_names

    def compute_model(self, parameters, debug=False):

        self.parameters_smg = parameters[:len(self.parameters_smg)]
        self.params['parameters_smg'] = str(self.parameters_smg).strip('[]')
        if self.parameters_2_smg:
            self.parameters_2_smg = parameters[len(self.parameters_smg):-2]
            self.params['parameters_2_smg'] = str(
                self.parameters_2_smg_short).strip('[]')
        self.params['h'] = parameters[-2]
        self.params['Omega_cdm'] = parameters[-1]

        if debug:
            del self.params['Omega_smg']
            self.params['Omega_smg_debug'] = -1

        self.cosmo.set(self.params)

        if debug:
            self.params['Omega_smg'] = -1

        self.cosmo.compute()

    def model_clean(self):
        self.cosmo.struct_cleanup()
        self.cosmo.empty()

    def compute_data(self, points):
        while len(self.data) < points:
            print "######## Point: {} of {}".format(len(self.data) + 1, points)

            h, Omega_cdm = self.compute_cosmological_params()
            # TODO: Improve managament of cosmological parameters.

            self.compute_parameters()
            self.params['parameters_smg'] = str(
                self.parameters_smg).strip('[]')
            if self.parameters_2_smg:
                self.params['parameters_2_smg'] = str(
                    self.parameters_2_smg_short).strip('[]')
            self.params['h'] = h
            self.params['Omega_cdm'] = Omega_cdm
            print[h, Omega_cdm] + self.parameters_smg
            print self.parameters_2_smg

            self.cosmo.set(self.params)
            try:
                self.cosmo.compute()

            except CosmoSevereError, e:
                print "CosmoSevere!!!"
                print e
                break

            except CosmoComputationError, e:
                print "CosmoCompu!!!"
                print e
                self.data_compu_error.append(self.parameters_smg +
                                             [h, Omega_cdm] +
                                             self.parameters_2_smg)
                self.cosmo.struct_cleanup()
                self.cosmo.empty()
                continue

            #except CosmoGuessingError, e:
            #    print "CosmoGuessing!!!"
            #    print e
            #    if "root must be bracketed in zriddr." in str(e):
            #        self.data_guess_error.append(self.parameters_smg + [h, Omega_cdm]
            #                                    + self.parameters_2_smg)
            #    self.cosmo.struct_cleanup()
            #    self.cosmo.empty()
            #    continue

            self.data.append(
                [self.cosmo.w0_smg(), self.cosmo.wa_smg()] +
                self.parameters_smg + [h, Omega_cdm] + self.parameters_2_smg)

            self.cosmo.struct_cleanup()
            self.cosmo.empty()
Пример #43
0
                background_Om_scf = background['(.)Omega_scf'] # read redshift
                background_z_at_tau = interp1d(background_tau,background_z)
                phi_enveloppe = Theta_initial[j]*10**cosmo.log10_f_axion()*(2/(10**ac[i]))**(-3.*(1+wn)/2/n_axion) ##evaluated today
                delta_max = np.sqrt(delta_phi_scf*delta_phi_scf*A_s*(k/K_star)**(n_s-1))/(phi_enveloppe)
                print max(delta_max), np.where(delta_max==max(delta_max))
                k_res_numerical = k[np.where(delta_max==max(delta_max))]*h ##evaluated today
                print "k_res analytical %f Mpc-1, k_res numerical %f Mpc-1"%(k_res_analytical,k_res_numerical)
            except CosmoComputationError: # this happens when CLASS fails
                print "bug!"
                k_res_numerical = 0
                k_nl_scf[i][j] = 100 #arbitrary large number: bug
                z_nl_scf[i][j] = 0 #arbitrary large number: bug
                Omega_nl_scf[i][j] = 1e-30 #arbitrary small number

            ##2em iteration: find z_nl at which k_res becomes non linear
            cosmo.empty()
            cosmo.struct_cleanup()
            if k_res_numerical != 0:
                print k_res_numerical
                params={'scf_potential': 'axion',
                'n_axion': n_axion,
                'scf_parameters':'%.2f,0.0'%(Theta_initial[j]),
                'log10_axion_ac':ac[i],
                'log10_fraction_axion_ac': -1, # Must input log10(fraction_axion_ac)
                'adptative_stepsize': 100,
                'scf_tuning_index': 0,
                'do_shooting': 'yes',
                'do_shooting_scf': 'yes',
                'h':h,
                'omega_b':0.02225,
                'omega_cdm':0.1198,
Пример #44
0
class Model():
    def __init__(self, cosmo=None):
        """
        Initialize the Model class. By default Model uses its own Class
        instance.

        cosmo = external Class instance. Default is None
        """
        if cosmo:
            self.cosmo = cosmo
        else:
            self.cosmo = Class()
        self.computed = {}
        self.texnames = {}

    def __set_scale(self, axes, xscale, yscale):
        """
        Set scales for axes in axes array.

        axes = axes array (e.g. f, ax = plt.subplots(2,2))
        xscale = linear array of xscale.
        yscale = linear array of yscale.

        Scales are set once axes is flatten. Each plot is counted from left to
        right an from top to bottom.
        """
        for i, ax in enumerate(axes.flat):
            ax.set_xscale(xscale[i])
            ax.set_yscale(yscale[i])

    def __set_label(self, axes, xlabel, ylabel):
        """
        Set labels for axes in axes array.

        axes = axes array (e.g. f, ax = plt.subplots(2,2))
        xlabel = linear array of xlabels.
        ylabel = linear array of ylabels.

        Labels are set once axes is flatten. Each plot is counted from left to
        right an from top to bottom.
        """
        for i, ax in enumerate(axes.flat):
            ax.set_xlabel(xlabel[i])
            ax.set_ylabel(ylabel[i])

    def __store_cl(self, cl_dic):
        """
        Store cl's as (l*(l+1)/2pi)*cl, which is much more useful.
        """

        ell = cl_dic['ell'][2:]

        for cl, list_val in cl_dic.iteritems():
            list_val = list_val[2:]
            if (list_val == ell).all():
                cl_dic[cl] = list_val
                continue
            list_val = (ell * (ell + 1) / (2 * np.pi)) * list_val
            cl_dic[cl] = list_val  # Remove first two null items (l=0,1)

        return cl_dic

    def add_derived(self, varied_name, keys, value):
        """
        Add a derived parameter for varied_name dictionary.

        varied_name = varied variable's name.
        keys = list of keys in descending level.
        value = value to store for new dictionary key.
        """

        dic = self.computed[varied_name]

        for key in keys:
            if key not in dic:
                dic[key] = {}

            dic = dic[key]

        dic.update(value)

    def compute_models(self, params, varied_name, index_variable, values,
                       back=[], thermo=[], prim=[], pert=[], trans=[],
                       pk=[0.0001, 0.1, 100], extra=[], update=True,
                       cosmo_msg=False, texname=""):
        """
        Fill dic with the hi_class output structures for the model with given
        params, modifying the varied_name value with values.

        params = parameters to be set in Class. They must be in agreement with
                what is asked for.
        varied_name = the name of the variable you are modifying. It will be
                      used as key in dic assigned to its background structures.
        index_variable = variable's index in parameters_smg array.
        values = varied variable values you want to compute the cosmology for.
        back = list of variables to store from background. If 'all', store the
              whole dictionary.
        thermo = list of variables to store from thermodynamics. If 'all',
                  store the whole dictionary.
        prim = list of variables to store from primordial. If 'all', store the
               whole dictionary.
        pert = list of variables to store from perturbations. If 'all', store
               the whole dictionary.
        trans = list of variables to store from transfer. If 'all', store
                the whole dictionary. get_transfer accept two optional
                arguments: z=0 and output_format='class' (avaible options are
                'class' or 'camb'). If different values are desired, first
                item of trans must be {'z': value, 'output_format': value}.
        pk = list with the minimum and maximum k values to store the present
             matter power spectrum and the number of points [k_min, k_max,
             number_points]. Default [10^-4, 10^1, 100].
        extra = list of any of the method or objects defined in cosmo, e.g.
                w0_smg().  It will store {'method': cosmo.w0_smg()}
        update = if True update old computed[key] dictionary elsewise create a
                 new one.  Default: True.
        cosmo_msg = if True, print cosmo.compute() messages. Default: False.
        """

        key = varied_name

        if texname:
            self.set_texnames({varied_name: texname})
        elif key not in self.texnames:  # texname will not be set at this stage. No check required
            self.set_texnames({varied_name: varied_name})

        if (not update) or (key not in self.computed.keys()):
            self.computed[key] = od()

        for val in values:
            # key = "{}={}".format(varied_name, val)
            params["parameters_smg"] = inip.vary_params(params["parameters_smg"], [[index_variable, val]])

            # It might be after the try to not store empty dictionaries.
            # Nevertheless, I find more useful having them to keep track of
            # those failed and, perhaps, to implement a method to obtain them
            # with Omega_smg_debug.
            d = self.computed[key][val] = {}

            self.cosmo.empty()
            self.cosmo.set(params)

            try:
                self.cosmo.compute()
            except Exception, e:
                print "Error: skipping {}={}".format(key, val)
                if cosmo_msg:
                    print e

                continue

            d['tunned'] = self.cosmo.get_current_derived_parameters(['tuning_parameter'])['tuning_parameter']

            for lst in [[back, 'back', self.cosmo.get_background],
                        [thermo, 'thermo', self.cosmo.get_thermodynamics],
                        [prim, 'prim', self.cosmo.get_thermodynamics]]:
                if lst[0]:
                    output = lst[2]()
                    if lst[0][0] == 'all':
                        d[lst[1]] = output
                    else:
                        d[lst[1]] = {}
                        for item in back:
                            if type(item) is list:
                                d[lst[1]].update({item[0]: output[item[0]][item[1]]})
                            else:
                                d[lst[1]].update({item: output[item]})

            if pert:
                # Perturbation is tricky because it can accept two optional
                # argument for get_perturbations and this method returns a
                # dictionary {'kind_of_pert': [{variable: list_values}]}, where
                # each item in the list is for a k (chosen in params).
                if type(pert[0]) is dict:
                    output = self.cosmo.get_perturbations(pert[0]['z'], pert[0]['output_format'])
                    if pert[1] == 'all':
                        d['pert'] = output
                else:
                    output = self.cosmo.get_perturbations()
                    if pert[0] == 'all':
                        d['pert'] = output

                if (type(pert[0]) is not dict) and (pert[0] != 'all'):
                    d['pert'] = {}
                    for subkey, lst in output.iteritems():
                        d['pert'].update({subkey: []})
                        for n, kdic in enumerate(lst):  # Each item is for a k
                            d['pert'][subkey].append({})
                            for item in pert:
                                if type(item) is list:
                                    d['pert'][subkey][n].update({item[0]: kdic[item[0]][item[1]]})
                                else:
                                    d['pert'][subkey][n].update({item: kdic[item]})

            for i in extra:
                exec('d[i] = self.cosmo.{}'.format(i))

            try:
                d['cl'] = self.__store_cl(self.cosmo.raw_cl())
            except CosmoSevereError:
                pass

            try:
                d['lcl'] = self.__store_cl(self.cosmo.lensed_cl())
            except CosmoSevereError:
                pass

            try:
                d['dcl'] = self.cosmo.density_cl()
            except CosmoSevereError:
                pass


            if ("output" in self.cosmo.pars) and ('mPk' in self.cosmo.pars['output']):
                k_array = np.linspace(*pk)
                pk_array = np.array([self.cosmo.pk(k, 0) for k in k_array])

                d['pk'] = {'k': k_array, 'pk': pk_array}

            self.cosmo.struct_cleanup()