Пример #1
0
def run_class(parameters, gettransfer):
    '''
    Run CLASS with the input parameters and return the perturbations and 
    the value of tau_0 (which should be fixed but we still return the value 
    for the purpose of checking) and the earliest transfer (if asked). Print the
    amount of time taken to run. 
    
    Args: 
    parameters: parameters to run CLASS 
    gettransfer (boolean): whether to get the earliest transfer 
    
    Return: (pts, tau_0) if gettransfer=False and  (pts, tau_0, transfer) otherwise
    '''

    start_time = time.time()

    cosmo = Class()
    cosmo.set(parameters)
    cosmo.compute()

    pts = cosmo.get_perturbations()['scalar']
    tau_0 = cosmo.get_current_derived_parameters(['conformal_age'])['conformal_age']
    
    print("--- %s seconds ---" % (time.time() - start_time))
    
    # 45999 is the largest redshift possible 
    if (gettransfer): 
        tf = cosmo.get_transfer(45999)
        return pts, tau_0, tf 
    
    return pts, tau_0
Пример #2
0
def test_class_setup():
    cosmology = astropy.cosmology.Planck13
    assert cosmology.Om0 == cosmology.Odm0 + cosmology.Ob0
    assert 1 == (cosmology.Om0 + cosmology.Ode0 + cosmology.Ok0 +
                 cosmology.Ogamma0 + cosmology.Onu0)
    class_parameters = get_class_parameters(cosmology)
    try:
        from classy import Class
        cosmo = Class()
        cosmo.set(class_parameters)
        cosmo.compute()
        assert cosmo.h() == cosmology.h
        assert cosmo.T_cmb() == cosmology.Tcmb0.value
        assert cosmo.Omega_b() == cosmology.Ob0
        # Calculate Omega(CDM)_0 two ways:
        assert abs((cosmo.Omega_m() - cosmo.Omega_b()) -
                   (cosmology.Odm0 - cosmology.Onu0)) < 1e-8
        assert abs(cosmo.Omega_m() - (cosmology.Om0 - cosmology.Onu0)) < 1e-8
        # CLASS calculates Omega_Lambda itself so this is a non-trivial test.
        calculated_Ode0 = cosmo.get_current_derived_parameters(
            ['Omega_Lambda'])['Omega_Lambda']
        assert abs(calculated_Ode0 - (cosmology.Ode0 + cosmology.Onu0)) < 1e-5
        cosmo.struct_cleanup()
        cosmo.empty()
    except ImportError:
        pass
Пример #3
0
def test_class_setup():
    cosmology = astropy.cosmology.Planck13
    assert cosmology.Om0 == cosmology.Odm0 + cosmology.Ob0
    assert 1 == (cosmology.Om0 + cosmology.Ode0 + cosmology.Ok0 +
                 cosmology.Ogamma0 + cosmology.Onu0)
    class_parameters = get_class_parameters(cosmology)
    try:
        from classy import Class
        cosmo = Class()
        cosmo.set(class_parameters)
        cosmo.compute()
        assert cosmo.h() == cosmology.h
        assert cosmo.T_cmb() == cosmology.Tcmb0.value
        assert cosmo.Omega_b() == cosmology.Ob0
        # Calculate Omega(CDM)_0 two ways:
        assert abs((cosmo.Omega_m() - cosmo.Omega_b()) -
                   (cosmology.Odm0 - cosmology.Onu0)) < 1e-8
        assert abs(cosmo.Omega_m() - (cosmology.Om0 - cosmology.Onu0)) < 1e-8
        # CLASS calculates Omega_Lambda itself so this is a non-trivial test.
        calculated_Ode0 = cosmo.get_current_derived_parameters(
            ['Omega_Lambda'])['Omega_Lambda']
        assert abs(calculated_Ode0 - (cosmology.Ode0 + cosmology.Onu0)) < 1e-5
        cosmo.struct_cleanup()
        cosmo.empty()
    except ImportError:
        pass
Пример #4
0
    def calculate_spectra(self, cosmo_params, force_recalc=False):
        settings = cosmo_params.copy()
        settings.update({
            "output": "tCl,mPk",
            "evolver": "1",
            "gauge": "newtonian",
            "P_k_max_1/Mpc": 10,
            })

        database = Database(config.DATABASE_DIR, "spectra.dat")

        if settings in database and not force_recalc:
            data = database[settings]
            ell = data["ell"]
            tt = data["tt"]
            kh = data["kh"]
            Pkh = data["Pkh"]
            self.z_rec = data["z_rec"]
        else:
            cosmo = Class()
            cosmo.set(settings)
            cosmo.compute()
            # Cl's
            data = cosmo.raw_cl()
            ell = data["ell"]
            tt = data["tt"]
            # Matter spectrum
            k = np.logspace(-3, 1, config.MATTER_SPECTRUM_CLIENT_SAMPLES_PER_DECADE * 4)
            Pk = np.vectorize(cosmo.pk)(k, 0)
            kh = k * cosmo.h()
            Pkh = Pk / cosmo.h()**3
            # Get redshift of decoupling
            z_rec = cosmo.get_current_derived_parameters(['z_rec'])['z_rec']
            self.z_rec = z_rec
            # Store to database
            database[settings] = {
            "ell": data["ell"],
            "tt": data["tt"],

            "kh": k,
            "Pkh": Pk,

            "z_rec": z_rec,
            }

        return ClSpectrum(ell[2:], tt[2:]), PkSpectrum(kh, Pkh)
Пример #5
0
class classy(SlikPlugin):
    """
    Plugin for CLASS.
    Credit: Brent Follin, Teresa Hamill, Andy Scacco
    """
    
    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()


    def __call__(self,
        **kwargs):


        self.model.set(**kwargs)
        self.model.compute()

        ell = arange(l_max_scalar+1)
        self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max_scalar)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
                           for x in ['TT','TE','EE','BB','PP','TP']}

        self.model.struct_cleanup()
        self.model.empty()
        
        return self.cmb_result

    def get_bao_observables(self, z):
        return {'H':self.model.Hubble(z),
                'D_A':self.model.angular_distance(z),
                'c':1.0,
                'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
Пример #6
0
class TestClass(unittest.TestCase):
    """
    Testing Class and its wrapper classy on different cosmologies

    To run it, do
    ~] nosetest test_class.py

    It will run many times Class, on different cosmological scenarios, and
    everytime testing for different output possibilities (none asked, only mPk,
    etc..)

    """

    def setUp(self):
        """
        set up data used in the tests.
        setUp is called before each test function execution.
        """
        self.cosmo = Class()

        self.verbose = {
            "input_verbose": 1,
            "background_verbose": 1,
            "thermodynamics_verbose": 1,
            "perturbations_verbose": 1,
            "transfer_verbose": 1,
            "primordial_verbose": 1,
            "spectra_verbose": 1,
            "nonlinear_verbose": 1,
            "lensing_verbose": 1,
            "output_verbose": 1,
        }
        self.scenario = {"lensing": "yes"}

    def tearDown(self):
        self.cosmo.struct_cleanup()
        self.cosmo.empty()
        del self.scenario

    @parameterized.expand(
        itertools.product(
            ("LCDM", "Mnu", "Positive_Omega_k", "Negative_Omega_k", "Isocurvature_modes"),
            (
                {"output": ""},
                {"output": "mPk"},
                {"output": "tCl"},
                {"output": "tCl pCl lCl"},
                {"output": "mPk tCl lCl", "P_k_max_h/Mpc": 10},
                {"output": "nCl sCl"},
                {"output": "tCl pCl lCl nCl sCl"},
            ),
            ({"gauge": "newtonian"}, {"gauge": "sync"}),
            ({}, {"non linear": "halofit"}),
        )
    )
    def test_wrapper_implementation(self, name, scenario, gauge, nonlinear):
        """Create a few instances based on different cosmologies"""
        if name == "Mnu":
            self.scenario.update({"N_ncdm": 1, "m_ncdm": 0.06})
        elif name == "Positive_Omega_k":
            self.scenario.update({"Omega_k": 0.01})
        elif name == "Negative_Omega_k":
            self.scenario.update({"Omega_k": -0.01})
        elif name == "Isocurvature_modes":
            self.scenario.update({"ic": "ad,nid,cdi", "c_ad_cdi": -0.5})

        self.scenario.update(scenario)
        if scenario != {}:
            self.scenario.update(gauge)
        self.scenario.update(nonlinear)

        sys.stderr.write("\n\n---------------------------------\n")
        sys.stderr.write("| Test case %s |\n" % name)
        sys.stderr.write("---------------------------------\n")
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")

        setting = self.cosmo.set(dict(self.verbose.items() + self.scenario.items()))
        self.assertTrue(setting, "Class failed to initialize with input dict")

        cl_list = ["tCl", "lCl", "pCl", "nCl", "sCl"]

        # Depending on the cases, the compute should fail or not
        should_fail = True
        output = self.scenario["output"].split()
        for elem in output:
            if elem in ["tCl", "pCl"]:
                for elem2 in output:
                    if elem2 == "lCl":
                        should_fail = False
                        break

        if not should_fail:
            self.cosmo.compute()
        else:
            self.assertRaises(CosmoSevereError, self.cosmo.compute)
            return

        self.assertTrue(self.cosmo.state, "Class failed to go through all __init__ methods")
        if self.cosmo.state:
            print "--> Class is ready"
        # Depending
        if "output" in self.scenario.keys():
            # Positive tests
            output = self.scenario["output"]
            for elem in output.split():
                if elem in cl_list:
                    print "--> testing raw_cl function"
                    cl = self.cosmo.raw_cl(100)
                    self.assertIsNotNone(cl, "raw_cl returned nothing")
                    self.assertEqual(np.shape(cl["tt"])[0], 101, "raw_cl returned wrong size")
                if elem == "mPk":
                    print "--> testing pk function"
                    pk = self.cosmo.pk(0.1, 0)
                    self.assertIsNotNone(pk, "pk returned nothing")
            # Negative tests of output functions
            if not any([elem in cl_list for elem in output.split()]):
                print "--> testing absence of any Cl"
                self.assertRaises(CosmoSevereError, self.cosmo.raw_cl, 100)
            if "mPk" not in self.scenario["output"].split():
                print "--> testing absence of mPk"
                # args = (0.1, 0)
                self.assertRaises(CosmoSevereError, self.cosmo.pk, 0.1, 0)

    @parameterized.expand(
        itertools.product(("massless", "massive", "both"), ("photons", "massless", "exact"), ("t", "s, t"))
    )
    def test_tensors(self, scenario, method, modes):
        """Test the new tensor mode implementation"""
        self.scenario = {}
        if scenario == "massless":
            self.scenario.update({"N_eff": 3.046, "N_ncdm": 0})
        elif scenario == "massiv":
            self.scenario.update({"N_eff": 0, "N_ncdm": 2, "m_ncdm": "0.03, 0.04", "deg_ncdm": "2, 1"})
        elif scenario == "both":
            self.scenario.update({"N_eff": 1.5, "N_ncdm": 2, "m_ncdm": "0.03, 0.04", "deg_ncdm": "1, 0.5"})

        sys.stderr.write("\n\n---------------------------------\n")
        sys.stderr.write("| Test case: %s %s %s |\n" % (scenario, method, modes))
        sys.stderr.write("---------------------------------\n")
        self.scenario.update({"tensor method": method, "modes": modes, "output": "tCl, pCl"})
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")
        self.cosmo.set(dict(self.verbose.items() + self.scenario.items()))
        self.cosmo.compute()

    @parameterized.expand(itertools.izip(powerset(["100*theta_s", "Omega_dcdmdr"]), powerset([1.04, 0.20])))
    def test_shooting_method(self, variables, values):
        Omega_cdm = 0.25

        scenario = {"Omega_b": 0.05}

        for variable, value in zip(variables, values):
            scenario.update({variable: value})

        if "Omega_dcdmdr" in variables:
            scenario.update({"Gamma_dcdm": 100, "Omega_cdm": Omega_cdm - scenario["Omega_dcdmdr"]})
        else:
            scenario.update({"Omega_cdm": Omega_cdm})

        sys.stderr.write("\n\n---------------------------------\n")
        sys.stderr.write("| Test shooting: %s |\n" % (", ".join(variables)))
        sys.stderr.write("---------------------------------\n")
        for key, value in scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")

        scenario.update(self.verbose)
        self.assertTrue(self.cosmo.set(scenario), "Class failed to initialise with this input")
        self.assertRaises
        self.cosmo.compute()

        # Now, check that the values are properly extracted
        for variable, value in zip(variables, values):
            if variable == "100*theta_s":
                computed_value = self.cosmo.get_current_derived_parameters([variable])[variable]
                self.assertAlmostEqual(value, computed_value, places=5)
Пример #7
0
                   'P_k_max_1/Mpc':P_k_max_inv_Mpc,
                   'compute damping scale':'yes', # needed to output and plot Silk damping scale
                   'gauge':'newtonian'}

###############
#
# call CLASS
#
###############
M = Class()
M.set(common_settings)
M.compute()
#
# define conformal time sampling array
#
times = M.get_current_derived_parameters(['tau_rec','conformal_age'])
tau_rec=times['tau_rec']
tau_0 = times['conformal_age']
tau1 = np.logspace(math.log10(tau_ini),math.log10(tau_rec),tau_num_early)
tau2 = np.logspace(math.log10(tau_rec),math.log10(tau_0),tau_num_late)[1:]
tau2[-1] *= 0.999 # this tiny shift avoids interpolation errors
tau = np.concatenate((tau1,tau2))
tau_num = len(tau)
#
# use table of background and thermodynamics quantitites to define some functions
# returning some characteristic scales
# (of Hubble crossing, sound horizon crossing, etc.) at different time
#
background = M.get_background() # load background table
#print background.viewkeys()
thermodynamics = M.get_thermodynamics() # load thermodynamics table
Пример #8
0
class classy(Theory):
    def initialise(self):
        """Importing CLASS from the correct path, if given, and if not, globally."""
        # If path not given, try using general path to modules
        path_to_installation = get_path_to_installation()
        if not self.path and path_to_installation:
            self.path = os.path.join(path_to_installation, "code",
                                     classy_repo_rename)
        if self.path:
            self.log.info("Importing *local* classy from " + self.path)
            classy_build_path = os.path.join(self.path, "python", "build")
            post = next(d for d in os.listdir(classy_build_path)
                        if d.startswith("lib."))
            classy_build_path = os.path.join(classy_build_path, post)
            if not os.path.exists(classy_build_path):
                self.log.error(
                    "Either CLASS is not in the given folder, "
                    "'%s', or you have not compiled it.", self.path)
                raise HandledException
            # Inserting the previously found path into the list of import folders
            sys.path.insert(0, classy_build_path)
        else:
            self.log.info("Importing *global* CLASS.")
        try:
            from classy import Class, CosmoSevereError, CosmoComputationError
        except ImportError:
            self.log.error(
                "Couldn't find the CLASS python interface. "
                "Make sure that you have compiled it, and that you either\n"
                " (a) specify a path (you didn't) or\n"
                " (b) install the Python interface globally with\n"
                "     '/path/to/class/python/python setup.py install --user'")
            raise HandledException
        self.classy = Class()
        # Propagate errors up
        global CosmoComputationError, CosmoSevereError
        # Generate states, to avoid recomputing
        self.n_states = 3
        self.states = [{
            "params": None,
            "derived": None,
            "derived_extra": None,
            "last": 0
        } for i in range(self.n_states)]
        # Dict of named tuples to collect requirements and computation methods
        self.collectors = {}
        # Additional input parameters to pass to CLASS
        self.extra_args = self.extra_args or {}
        self.extra_args["output"] = self.extra_args.get("output", "")
        if "sBBN file" in self.extra_args:
            self.extra_args["sBBN file"] = (os.path.join(
                self.path, self.extra_args["sBBN file"]))
        # Derived parameters that may not have been requested, but will be necessary later
        self.derived_extra = []

    def current_state(self):
        lasts = [self.states[i]["last"] for i in range(self.n_states)]
        return self.states[lasts.index(max(lasts))]

    def needs(self, arguments):
        # Computed quantities requiered by the likelihood
        arguments = arguments or {}
        for k, v in arguments.items():
            # Precision parameters and boundaries (in general, take max of all requested)
            if k == "l_max":
                self.extra_args["l_max_scalars"] = (max(
                    v, self.extra_args.get("l_max_scalars", 0)))
            elif k == "k_max":
                self.extra_args["P_k_max_h/Mpc"] = (max(
                    v, self.extra_args.get("P_k_max_h/Mpc", 0)))
            # Products and other computations
            elif k == "Cl":
                if any([("t" in cl.lower()) for cl in v]):
                    self.extra_args["output"] += " tCl"
                if any([(("e" in cl.lower()) or ("b" in cl.lower()))
                        for cl in v]):
                    self.extra_args["output"] += " pCl"
                # For modern experiments, always lensed Cl's!
                self.extra_args["output"] += " lCl"
                self.extra_args["lensing"] = "yes"
                self.extra_args["non linear"] = "halofit"
                self.collectors[k] = collector(method="lensed_cl", kwargs={})
                self.collectors["TCMB"] = collector(method="T_cmb", kwargs={})
            elif k == "fsigma8":
                self.collectors["growth_factor_f"] = collector(
                    method="scale_independent_growth_factor_f",
                    args=[np.atleast_1d(v["redshifts"])],
                    arg_array=0)
                self.collectors["sigma8"] = collector(
                    method="sigma",
                    # Notice: Needs H0 for 1st arg (R), so added later
                    args=[None, np.atleast_1d(v["redshifts"])],
                    arg_array=1)
                if "H0" not in self.input_params:
                    self.derived_extra += ["H0"]
                self.extra_args["output"] += " mPk"
                self.extra_args["P_k_max_h/Mpc"] = (max(
                    1, self.extra_args.get("P_k_max_h/Mpc", 0)))
                self.add_z_for_matter_power(v["redshifts"])
            elif k == "h_of_z":
                self.collectors[k] = collector(
                    method="Hubble",
                    args=[np.atleast_1d(v["redshifts"])],
                    arg_array=0)
                self.H_units_conv_factor = {
                    "/Mpc": 1,
                    "km/s/Mpc": _c
                }[v["units"]]
            elif k == "angular_diameter_distance":
                self.collectors[k] = collector(
                    method="angular_distance",
                    args=[np.atleast_1d(v["redshifts"])],
                    arg_array=0)
            else:
                # Extra derived parameters
                if v is None:
                    self.derived_extra += [self.translate_param(k)]
                else:
                    self.log.error("Unknown required product: '%s:%s'.", k, v)
                    raise HandledException
        # Derived parameters (if some need some additional computations)
        if "sigma8" in self.output_params or arguments:
            self.extra_args["output"] += " mPk"
            self.extra_args["P_k_max_h/Mpc"] = (max(
                1, self.extra_args.get("P_k_max_h/Mpc", 0)))
        # Since the Cl collector needs lmax, update it now, in case it has increased
        # *after* declaring the Cl collector
        if "Cl" in self.collectors:
            self.collectors["Cl"].kwargs["lmax"] = self.extra_args[
                "l_max_scalars"]
        # Cleanup of products string
        self.extra_args["output"] = " ".join(
            set(self.extra_args["output"].split()))

    def add_z_for_matter_power(self, z):
        if not hasattr(self, "z_for_matter_power"):
            self.z_for_matter_power = np.empty((0))
        self.z_for_matter_power = np.flip(np.sort(
            np.unique(
                np.concatenate([self.z_for_matter_power,
                                np.atleast_1d(z)]))),
                                          axis=0)
        self.extra_args["z_pk"] = " ".join(
            ["%g" % zi for zi in self.z_for_matter_power])

    def translate_param(self, p):
        if self.use_planck_names:
            return self.planck_to_classy.get(p, p)
        return p

    def set(self, params_values_dict, i_state):
        # Store them, to use them later to identify the state
        self.states[i_state]["params"] = deepcopy(params_values_dict)
        # Prepare parameters to be passed: this-iteration + extra
        args = {
            self.translate_param(p): v
            for p, v in params_values_dict.items()
        }
        args.update(self.extra_args)
        # Generate and save
        self.log.debug("Setting parameters: %r", args)
        self.classy.struct_cleanup()
        self.classy.set(**args)

    def compute(self, derived=None, **params_values_dict):
        lasts = [self.states[i]["last"] for i in range(self.n_states)]
        try:
            # are the parameter values there already?
            i_state = next(i for i in range(self.n_states)
                           if self.states[i]["params"] == params_values_dict)
            # Get (pre-computed) derived parameters
            if derived == {}:
                derived.update(self.states[i_state]["derived"])
            self.log.debug("Re-using computed results (state %d)", i_state)
        except StopIteration:
            # update the (first) oldest one and compute
            i_state = lasts.index(min(lasts))
            self.log.debug("Computing (state %d)", i_state)
            # Set parameters
            self.set(params_values_dict, i_state)
            # Compute!
            try:
                self.classy.compute()
            # "Valid" failure of CLASS: parameters too extreme -> log and report
            except CosmoComputationError:
                self.log.debug("Computation of cosmological products failed. "
                               "Assigning 0 likelihood and going on.")
                return False
            # CLASS not correctly initialised, or input parameters not correct
            except CosmoSevereError:
                self.log.error(
                    "Serious error setting parameters or computing results. "
                    "The parameters passed were %r and %r. "
                    "See original CLASS's error traceback below.\n",
                    self.states[i_state]["params"], self.extra_args)
                raise  # No HandledException, so that CLASS traceback gets printed
            # Gather products
            for product, collector in self.collectors.items():
                # Special case: sigma8 needs H0, which cannot be known beforehand:
                if "sigma8" in self.collectors:
                    self.collectors["sigma8"].args[0] = 8 / self.classy.h()
                method = getattr(self.classy, collector.method)
                if self.collectors[product].arg_array is None:
                    self.states[i_state][product] = method(
                        *self.collectors[product].args,
                        **self.collectors[product].kwargs)
                else:
                    i_array = self.collectors[product].arg_array
                    self.states[i_state][product] = np.zeros(
                        len(self.collectors[product].args[i_array]))
                    for i, v in enumerate(
                            self.collectors[product].args[i_array]):
                        args = (
                            list(self.collectors[product].args[:i_array]) +
                            [v] +
                            list(self.collectors[product].args[i_array + 1:]))
                        self.states[i_state][product][i] = method(
                            *args, **self.collectors[product].kwargs)
            # Prepare derived parameters
            d, d_extra = self.get_derived_all(
                derived_requested=(derived == {}))
            derived.update(d)
            self.states[i_state]["derived"] = odict(
                [[p, derived.get(p)] for p in self.output_params])
            # Prepare necessary extra derived parameters
            self.states[i_state]["derived_extra"] = deepcopy(d_extra)
        # make this one the current one by decreasing the antiquity of the rest
        for i in range(self.n_states):
            self.states[i]["last"] -= max(lasts)
        self.states[i_state]["last"] = 1
        return True

    def get_derived_all(self, derived_requested=True):
        """
        Returns a dictionary of derived parameters with their values,
        using the *current* state (i.e. it should only be called from
        the ``compute`` method).

        To get a parameter *from a likelihood* use `get_param` instead.
        """
        list_requested_derived = self.output_params if derived_requested else []
        de_translated = {
            self.translate_param(p): p
            for p in list_requested_derived
        }
        requested_derived_with_extra = list(de_translated.keys()) + list(
            self.derived_extra)
        derived_aux = {}
        # Exceptions
        if "rs_drag" in requested_derived_with_extra:
            requested_derived_with_extra.remove("rs_drag")
            derived_aux["rs_drag"] = self.classy.rs_drag()
        derived_aux.update(
            self.classy.get_current_derived_parameters(
                requested_derived_with_extra))
        # Fill return dictionaries
        derived = {
            de_translated[p]: derived_aux[self.translate_param(p)]
            for p in de_translated
        }
        derived_extra = {p: derived_aux[p] for p in self.derived_extra}
        # No need for error control: classy.get_current_derived_parameters is passed
        # every derived parameter not excluded before, and cause an error if if founds a
        # parameter that it does not recognise
        return derived, derived_extra

    def get_param(self, p):
        """
        Interface function for likelihoods to get sampled and derived parameters.
        """
        current_state = self.current_state()
        for pool in ["params", "derived", "derived_extra"]:
            value = current_state[pool].get(self.translate_param(p), None)
            if value is not None:
                return value
        self.log.error("Parameter not known: '%s'", p)
        raise HandledException

    def get_cl(self, ell_factor=False):
        """
        Returns the power spectra in microK^2
        (unitless for lensing potential),
        using the *current* state.
        """
        current_state = self.current_state()
        # get C_l^XX from the cosmological code
        try:
            cl = deepcopy(current_state["Cl"])
        except:
            self.log.error(
                "No Cl's were computed. Are you sure that you have requested them?"
            )
            raise HandledException
        ell_factor = ((cl["ell"] + 1) * cl["ell"] /
                      (2 * np.pi))[2:] if ell_factor else 1
        # convert dimensionless C_l's to C_l in muK**2
        T = current_state["TCMB"]
        for key in cl:
            # All quantities need to be multiplied by this factor, except the
            # phi-phi term, that is already dimensionless
            if key not in ['pp', 'ell']:
                cl[key][2:] *= (T * 1.e6)**2 * ell_factor
        if "pp" in cl and ell_factor is not 1:
            cl['pp'][2:] *= ell_factor**2 * (2 * np.pi)
        return cl

    def get_fsigma8(self, z):
        indices = np.where(self.z_for_matter_power == z)
        return (self.current_state()["growth_factor_f"][indices] *
                self.current_state()["sigma8"][indices])

    def get_h_of_z(self, z):
        return self.current_state()["h_of_z"][np.where(
            self.collectors["h_of_z"].args[self.collectors["h_of_z"].arg_array]
            == z)] * self.H_units_conv_factor

    def get_angular_diameter_distance(self, z):
        return self.current_state()["angular_diameter_distance"][np.where(
            self.collectors["angular_diameter_distance"].args[
                self.collectors["angular_diameter_distance"].arg_array] == z)]
Пример #9
0
###############################################################################################################################################
log10_axion_ac = -3.55314
n_axion = 3
N = 20
log10_fraction_axion_ac = -0.8788
Theta_initial = np.linspace(1.5, 3, N, endpoint = True)
rs_rec=[]
rd_rec=[]
da_rec=[]
# color=['r','b']
cosmo = Class()
cosmo.set({'h':0.72,
'write thermodynamics':'yes',
'compute damping scale':'yes'})
cosmo.compute() # solve physics
derived = cosmo.get_current_derived_parameters(['z_rec','tau_rec','conformal_age','rs_rec','rd_rec'])
rs_LCDM = int(1000.*derived['rs_rec'])/1000.
rd_LCDM = int(1000.*derived['rd_rec'])/1000.
# label=[r'$\Delta N_{\rm eff} = 0.5$',r'$f_{\rm EDE}(a_c) = 10\%,~a_c = 0.0002$']
for i in range(N):
	print Theta_initial[i]
	# params['Omega_fld']
	# params['N_ur'] = Eps_x[i]/(A*(1-Eps_x[i]))
	cosmo.set({'scf_potential': 'axion',
    'n_axion': n_axion,
    'log10_axion_ac': log10_axion_ac, # Must input log10(axion_ac)
    # log10_fraction_axion_ac': -1.922767 # Must input log10(fraction_axion_ac)
    'log10_fraction_axion_ac': log10_fraction_axion_ac, # Must input log10(fraction_axion_ac)
    # m_axion': 1.811412e+06
    # f_axion': 1
    'scf_parameters':'%.2f,0.0'%(Theta_initial[i]), #phi_i,phi_dot_i //dummy: phi_i will be updated.
Пример #10
0
class classy(SlikPlugin):
    """
    Plugin for CLASS.
    Credit: Brent Follin, Teresa Hamill, Andy Scacco
    """

    #{cosmoslik name : class name} - This needs to be done even for variables with the same name (because of for loop in self.model.set)!
    name_mapping = {#'As':'A_s',
                    #'ns':'n_s',
                    #'r':'r',
                    'custom1':'custom1',
                    'custom2':'custom2',
                    'custom3':'custom3',
                    #'nt':'n_t',
                    'ombh2':'omega_b',
                    'omch2':'omega_cdm',
                    'omnuh2':'omega_ncdm',
                    'tau':'tau_reio',
                    'H0':'H0',
                    'massive_neutrinos':'N_ncdm',
                    'massless_neutrinos':'N_ur',
                    'Yp':'YHe',
                    'pivot_scalar':'k_pivot',
                    'omk':'Omega_k',
                    'l_max_scalar':'l_max_scalars',
                    'l_max_tensor':'l_max_tensors',
                    'Tcmb':'T_cmb'
                    }


    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()

    #def __call__(self,
    #             **kwargs):
    
    #    d={}
     #   for k, v in kwargs.iteritems():
      #      if k in self.name_mapping and v is not None:
       #         d[self.name_mapping[k]]=v
        #    else:
         #       d[k]=v
    
    #def __call__(self,
                 #ombh2,
                 #omch2,
                 #H0,
                 #As,
                 #ns,
                 #custom1,
                 #custom2,
                 #custom3,
                 #tau,
                 #w=None,
                 #r=None,
                 #nrun=None,
                 #omk=0,
                 #Yp=None,
                 #Tcmb=2.7255,
                 #massless_neutrinos=3.046,
                 #l_max_scalar=3000,
                 #l_max_tensor=3000,
                 #pivot_scalar=0.05,
                 #outputs=[],
                 #**kwargs):

        #print kwargs
        
    def __call__(self,**kwargs):
        #print kwargs
        #print kwargs['classparamlist']
        #print kwargs['d']
        
        d={}
        for k,v in kwargs.iteritems():
            if k in kwargs['classparamlist']:
                if k in self.name_mapping and v is not None:
                    d[self.name_mapping[k]]=v
                else:
                    d[k]=v
            
        
        #d['P_k_ini type']='external_Pk'
        #d['modes'] = 's,t'
        self.model.set(**d)
                       
        l_max = d['l_max_scalars']
        Tcmb =  d['T_cmb']
        
        #print l_max

        #print d
        
        self.model.compute()

        ell = arange(l_max+1)
        self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
                           for x in ['TT','TE','EE','BB','PP','TP']}

        self.model.struct_cleanup()
        self.model.empty()
        
        return self.cmb_result

    def get_bao_observables(self, z):
        return {'H':self.model.Hubble(z),
                'D_A':self.model.angular_distance(z),
                'c':1.0,
                'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
Пример #11
0
class Binning():
    def __init__(self, fname, outdir='./'):
        self._cosmo = Class()
        self._fname = fname
        self._outdir = outdir
        self._set_default_values()

    def _set_full_filenames(self, filesuffixes):
        """
        Return a list with the fullnames of the others, increasing their number
        in case they exist

        Additionally, set the full file names, of self._fparamsname and
        self.fshootname.
        """
        fullfilenames = []
        for suffix in filesuffixes + ['params', 'shooting']:
            fullfilenames.append(
                os.path.join(self._outdir, self._fname + '-' + suffix))

        i = 0

        bools = [True] * len(fullfilenames)

        while 1:
            for n, f in enumerate(fullfilenames):
                bools[n] = os.path.exists(f + '-%s.txt' % i)

            if True not in bools:
                break

            i += 1

        self._fparamsname = fullfilenames[-2] + '-%s.txt' % i
        self._fshootname = fullfilenames[-1] + '-%s.txt' % i

        return [f + '-%s.txt' % i for f in fullfilenames[:-2]]

    def _set_default_values(self):
        """
        Set default values of parameters lists.
        """
        self.params_smg = []
        self.params_2_smg = []
        self.h = []
        self.Omega_cdm = []

        self.gravity_model = []

        self._params = {
            "Omega_Lambda": 0,
            "Omega_fld": 0,
            "Omega_smg": -1,
            'output': 'mPk',  #
            'z_max_pk': 1000
        }  # Added for relative errors in f.

        self._computed = False
        self._path = []
        self._binType = ''

    def set_Pade(self,
                 n_num,
                 m_den,
                 xvar='a',
                 xReverse=False,
                 accuracy=1e-3,
                 increase=False,
                 maxfev=0):
        """
        Set what Pade polynomial orders, temporal variable and its ordering use.
        """
        self.reset()
        self._PadeOrder = [n_num, m_den]
        self._Pade_xvar = xvar
        self._Pade_xReverse = xReverse
        self._Pade_maxfev = maxfev
        self._Pade_increase = increase
        self._Pade_accuracy = accuracy
        self._binType = 'Pade'

    def set_fit(self,
                fit_function,
                n_coeffs,
                variable_to_fit,
                fit_function_label='',
                z_max_pk=1000,
                bounds=(-np.inf, np.inf),
                p0=[],
                xvar='ln(1+z)'):
        """
        Set the fitting_function and the number of coefficients.

        variable_to_fit must be one of 'F'or 'w'.

        fit_function_label will be written in the header of fit files.
        """
        self.reset()
        self._fit_function = fit_function
        self._n_coeffs = n_coeffs
        self._list_variables_to_fit = ['F', 'w', 'logRho', 'logX', 'X']
        if variable_to_fit in self._list_variables_to_fit:
            self._variable_to_fit = variable_to_fit
        else:
            raise ValueError('variable_to_fit must be one of {}'.format(
                self._list_variables_to_fit))

        self._fit_function_label = fit_function_label
        self._binType = 'fit'
        self._fFitname = self._set_full_filenames(['fit-' + variable_to_fit
                                                   ])[0]
        self._params.update({'z_max_pk': z_max_pk})
        self._fit_bounds = bounds
        self._p0 = p0
        list_fit_xvar = ['ln(1+z)', 'lna', 'a', '(1-a)']
        if xvar in list_fit_xvar:
            self._fit_xvar = xvar
        else:
            raise ValueError('xvar must be one of {}'.format(list_fit_xvar))

    def set_bins(self, zbins, abins):
        """
        Set what bins to use and reset to avoid confusions.
        """
        self.reset()
        self._zbins = zbins
        self._abins = abins
        self._binType = 'bins'
        self._fwzname, self._fwaname = self._set_full_filenames(
            ['wz-bins', 'wa-bins'])

    def _read_from_file(self, path):
        """
        Return params for class from files used in quintessence Marsh.
        """
        with open(path) as f:
            f.readline()
            header = f.readline().split()[3:]  # Remove '#', 'w0', and 'wa'

        columns = np.loadtxt(path, unpack=True)[2:]  # Remove columns w0, wa

        for index_h, head in enumerate(header):
            if head[-1] == 'h':
                break

        self.params_smg = zip(*columns[:index_h])
        self.params_2_smg = [
            list(row[~np.isnan(row)])
            for row in np.array(zip(*columns[index_h + 2:]))
        ]
        self.h = columns[index_h]
        self.Omega_cdm = columns[index_h + 1]

        self.gravity_model = os.path.basename(path).split('-')[0]

    def _params_from_row(self, row):
        """
        Set parameters.
        """
        params = self._params
        params.update({
            'parameters_smg': str(self.params_smg[row]).strip('[()]'),
            'h': self.h[row],
            'Omega_cdm': self.Omega_cdm[row],
            'gravity_model': self.gravity_model
        })

        if len(self.params_2_smg):
            params.update({
                'parameters_2_smg':
                str(self.params_2_smg[row]).strip('[()]')
            })

        return params

    def compute_bins(self, params):
        """
        Compute the w_i bins for the model with params.
        """
        wzbins = np.empty(len(self._zbins))
        wabins = np.empty(len(self._abins))
        self._params = params
        self._cosmo.set(params)
        try:
            self._cosmo.compute()
            for n, z in enumerate(self._zbins):
                wzbins[n] = self._cosmo.w_smg(z)
            for n, a in enumerate(self._abins):
                wabins[n] = self._cosmo.w_smg(1. / a - 1.)
            shoot = self._cosmo.get_current_derived_parameters(
                ['tuning_parameter'])['tuning_parameter']
        except Exception as e:
            self._cosmo.struct_cleanup()
            self._cosmo.empty()
            raise e

        self._cosmo.struct_cleanup()
        self._cosmo.empty()

        return wzbins, wabins, shoot

    def _compute_common_init(self, params):
        """
        Common first steps for compute methods
        """
        self._params.update(params)
        self._cosmo.set(self._params)

        try:
            self._cosmo.compute()
            b = self._cosmo.get_background()
            shoot = self._cosmo.get_current_derived_parameters(
                ['tuning_parameter'])['tuning_parameter']
        except Exception as e:
            self._cosmo.struct_cleanup()
            self._cosmo.empty()
            raise e

        return b, shoot

    def _fit(self, X, Y):
        """
        Fits self._fit_function to X, Y, with self._n_coeffs.
        """
        return wicm.fit(self._fit_function,
                        X,
                        Y,
                        self._n_coeffs,
                        bounds=self._fit_bounds,
                        p0=self._p0)

    def _get_fit_xvar_and_log1Plusz(self, z):
        """
        Return the X array to fit and log(1+z)
        """
        if self._fit_xvar == 'ln(1+z)':
            X = np.log(z + 1)
            lna = X
        elif self._fit_xvar == 'lna':
            X = -np.log(z + 1)
            lna = -X
        elif self._fit_xvar == 'a':
            X = 1. / (1 + z)
            lna = np.log(z + 1)
        elif self._fit_xvar == '(1-a)':
            X = (z / (1 + z))
            lna = np.log(z + 1)

        return X, lna

    def compute_fit_coefficients_for_F(self, params):
        """
        Returns the coefficients of the polynomial fit of f(a) = \int w and the
        maximum and minimum residual in absolute value.
        """
        b, shoot = self._compute_common_init(params)

        # Compute the exact \int dlna a
        ###############################
        z, w = b['z'], b['w_smg']

        Fint = []
        lna = -np.log(1 + z)[::-1]
        for i in lna:
            Fint.append(integrate.trapz(w[::-1][lna >= i], lna[lna >= i]))
        Fint = np.array(Fint)

        #####

        zlim = self._params['z_max_pk']
        # Note that lna is log(1+z). I used this name because is more convenient
        X, lna = self._get_fit_xvar_and_log1Plusz(z[z <= zlim])

        #####################
        zTMP = z[z <= zlim]
        Y1 = Fint[::-1][z < zlim]  # Ordered as in CLASS

        #####################

        # Fit to fit_function
        #####################
        popt1, yfit1 = self._fit(X, Y1)

        # Obtain max. rel. dev. for DA and f.
        #####################

        rhoDE_fit = b['(.)rho_smg'][-1] * np.exp(-3 * yfit1) * (
            1 + zTMP)**3  ###### CHANGE WITH CHANGE OF FITTED THING

        Xw_fit, w_fit = wicm.diff(lna, yfit1)
        w_fit = -interp1d(
            Xw_fit, w_fit, bounds_error=False, fill_value='extrapolate')(lna)

        DA_reldev, f_reldev = self._compute_maximum_relative_error_DA_f(
            rhoDE_fit, w_fit)

        # Free structures
        ###############
        self._cosmo.struct_cleanup()
        self._cosmo.empty()

        return np.concatenate([popt1, [DA_reldev, f_reldev]]), shoot

    def compute_fit_coefficients_for_logX(self, params):
        """
        Returns the coefficients of the polynomial fit of log(rho/rho_0) = -3
        \int dlna (w+1) and the maximum and minimum residual in absolute value.
        """
        b, shoot = self._compute_common_init(params)

        # Compute the exact -3 \int dlna (w + 1)
        ###############################
        z = b['z']

        logX = np.log(b['(.)rho_smg'] / b['(.)rho_smg'][-1])

        #####

        zlim = self._params['z_max_pk']
        # Note that lna is log(1+z). I used this name because is more convenient
        X, lna = self._get_fit_xvar_and_log1Plusz(z[z <= zlim])

        #####################
        Y1 = logX[z <= zlim]

        #####################

        # Fit to fit_function
        #####################
        popt1, yfit1 = self._fit(X, Y1)

        # Obtain max. rel. dev. for DA and f.
        #####################

        rhoDE_fit = b['(.)rho_smg'][-1] * np.exp(
            yfit1)  ###### CHANGE WITH CHANGE OF FITTED THING

        Xw_fit, ThreewPlus1 = wicm.diff(lna, yfit1)
        w_fit = ThreewPlus1 / 3. - 1  # The minus sign is taken into account by the CLASS ordering.
        w_fit = interp1d(Xw_fit,
                         w_fit,
                         bounds_error=False,
                         fill_value='extrapolate')(lna)

        DA_reldev, f_reldev = self._compute_maximum_relative_error_DA_f(
            rhoDE_fit, w_fit)

        # Free structures
        ###############
        self._cosmo.struct_cleanup()
        self._cosmo.empty()

        return np.concatenate([popt1, [DA_reldev, f_reldev]]), shoot

    def compute_fit_coefficients_for_X(self, params):
        """
        Returns the coefficients of the polynomial fit of rho/rho_0 = exp[-3
        \int dlna (w+1)] and the maximum and minimum residual in absolute value.
        """
        b, shoot = self._compute_common_init(params)

        # Compute the exact -3 \int dlna (w + 1)
        ###############################
        z = b['z']

        Y = b['(.)rho_smg'] / b['(.)rho_smg'][-1]

        #####

        zlim = self._params['z_max_pk']
        # Note that lna is log(1+z). I used this name because is more convenient
        X, lna = self._get_fit_xvar_and_log1Plusz(z[z <= zlim])

        #####################
        Y1 = Y[z <= zlim]

        #####################

        # Fit to fit_function
        #####################
        popt1, yfit1 = self._fit(X, Y1)

        # Obtain max. rel. dev. for DA and f.
        #####################

        rhoDE_fit = b['(.)rho_smg'][
            -1] * yfit1  ###### CHANGE WITH CHANGE OF FITTED THING

        Xw_fit, diff = wicm.diff(lna, yfit1)
        diff = interp1d(Xw_fit,
                        diff,
                        bounds_error=False,
                        fill_value='extrapolate')(lna)
        ThreewPlus1 = diff / yfit1
        w_fit = ThreewPlus1 / 3. - 1  # The minus sign is taken into account by the CLASS ordering.

        DA_reldev, f_reldev = self._compute_maximum_relative_error_DA_f(
            rhoDE_fit, w_fit)

        # Free structures
        ###############
        self._cosmo.struct_cleanup()
        self._cosmo.empty()

        return np.concatenate([popt1, [DA_reldev, f_reldev]]), shoot

    def compute_fit_coefficients_for_logRho(self, params):
        """
        Returns the coefficients of the fit of ln(rho_de) and the maximum and
        minimum residual in absolute value.
        """
        b, shoot = self._compute_common_init(params)

        # Compute the exact log(rho)
        ###############################
        z = b['z']

        logX = np.log(b['(.)rho_smg'])

        #####

        zlim = self._params['z_max_pk']
        # Note that lna is log(1+z). I used this name because is more convenient
        X, lna = self._get_fit_xvar_and_log1Plusz(z[z <= zlim])

        #####################
        Y1 = logX[z <= zlim]

        #####################

        # Fit to fit_function
        #####################

        popt1, yfit1 = self._fit(X, Y1)

        # Obtain max. rel. dev. for DA and f.
        #####################

        rhoDE_fit = np.exp(yfit1)  ###### CHANGE WITH CHANGE OF FITTED THING

        Xw_fit, ThreewPlus1 = wicm.diff(lna, yfit1 - yfit1[-1])
        w_fit = ThreewPlus1 / 3. - 1  # The minus sign is taken into account by the CLASS ordering.
        w_fit = interp1d(Xw_fit,
                         w_fit,
                         bounds_error=False,
                         fill_value='extrapolate')(lna)

        DA_reldev, f_reldev = self._compute_maximum_relative_error_DA_f(
            rhoDE_fit, w_fit)

        # Free structures
        ###############
        self._cosmo.struct_cleanup()
        self._cosmo.empty()

        return np.concatenate([popt1, [DA_reldev, f_reldev]]), shoot

    def compute_fit_coefficients_for_w(self, params):
        """
        Returns the coefficients of the polynomial fit of f(a) = \int w and the
        maximum and minimum residual in absolute value.
        """
        b, shoot = self._compute_common_init(params)

        # Compute the exact \int dlna a
        ###############################
        z, w = b['z'], b['w_smg']

        zlim = self._params['z_max_pk']
        # Note that lna is log(1+z). I used this name because is more convenient
        X, lna = self._get_fit_xvar_and_log1Plusz(z[z <= zlim])

        #####################
        zTMP = z[z <= zlim]
        Y1 = w[z <= zlim]

        # Fit to fit_function
        #####################
        popt1, yfit1 = self._fit(X, Y1)

        # Obtain max. rel. dev. for DA and f.
        #####################
        Fint = []
        lna = -np.log(1 + zTMP)[::-1]
        for i in lna:
            Fint.append(integrate.trapz(yfit1[::-1][lna >= i], lna[lna >= i]))
        Fint = np.array(Fint[::-1])

        rhoDE_fit = b['(.)rho_smg'][-1] * np.exp(
            -3 * Fint) * (1 + zTMP)**3  # CHANGE WITH CHANGE OF FITTED THING

        # TODO: needed?
        # Xw_fit, w_fit = X, yfit1
        # w_fit = interp1d(Xw_fit, w_fit, bounds_error=False, fill_value='extrapolate')(X)
        w_fit = yfit1

        DA_reldev, f_reldev = self._compute_maximum_relative_error_DA_f(
            rhoDE_fit, w_fit)

        # Free structures
        ###############
        self._cosmo.struct_cleanup()
        self._cosmo.empty()

        return np.concatenate([popt1, [DA_reldev, f_reldev]]), shoot

    def _compute_maximum_relative_error_DA_f(self, rhoDE_fit, w_fit):
        """
        Return the relative error for the diameter angular distance and the
        growth factor, f.

        rhoDE_fit = array
        wfit = interp1d(w)
        """

        b = self._cosmo.get_background()

        # Compute the exact growth rate
        #####################
        z_max_pk = self._params['z_max_pk']
        zlim = z_max_pk
        z, w = b['z'], b['w_smg']
        zTMP = z[z <= zlim]
        rhoM = (b['(.)rho_b'] + b['(.)rho_cdm'])
        rhoR = (b['(.)rho_g'] + b['(.)rho_ur'])
        DA = b['ang.diam.dist.']

        OmegaDEwF_exact = interp1d(z[z <= z_max_pk],
                                   (b['(.)rho_smg'] / b['(.)rho_crit'] *
                                    w)[z <= z_max_pk])
        OmegaMF = interp1d(z[z <= z_max_pk],
                           (rhoM / b['(.)rho_crit'])[z <= z_max_pk])

        time_boundaries = [z[z <= z_max_pk][0], z[z <= z_max_pk][-1]]

        # Use LSODA integrator as some solutions were wrong with RK45 and OK
        # with this.
        f = integrate.solve_ivp(
            cosmo_extra.fprime(OmegaDEwF_exact, OmegaMF),
            time_boundaries,
            [cosmo_extra.growthrate_at_z(self._cosmo, z_max_pk)],
            method='LSODA',
            dense_output=True)

        # Compute D_A for fitted model
        ################
        H_fit = np.sqrt(rhoM[z <= zlim] + rhoR[z <= zlim] + rhoDE_fit)

        DA_fit = cosmo_extra.angular_distance(z[z <= zlim],
                                              H_fit[zTMP <= zlim])

        # Compute the growth rate for fitted model
        ###############

        OmegaMF_fit = interp1d(
            zTMP, 1 - rhoDE_fit / H_fit**2 - rhoR[z <= zlim] /
            H_fit**2)  ####### THIS FITS OBSERVABLES CORRECTLY
        # OmegaMF_fit = interp1d(zTMP, rhoM[z<=zlim]/H_fit**2)      ####### THIS FITS OBSERVABLES CORRECTLY
        OmegaDEwF_fit = interp1d(zTMP, rhoDE_fit / H_fit**2 * w_fit)

        f_fit = integrate.solve_ivp(
            cosmo_extra.fprime(OmegaDEwF_fit,
                               OmegaMF_fit), [zTMP[0], zTMP[-1]],
            [cosmo_extra.growthrate_at_z(self._cosmo, zTMP[0])],
            method='LSODA',
            dense_output=True)

        # Obtain rel. deviations.
        ################

        # Remove close to 0 points as rel.dev diverges. z = 0.05 is the lowest
        # redshift observed and is done in BOSS survey. arXiv: 1308.4164
        # DA_reldev = max(np.abs(DA_fit[zTMP>=0.04]/DA[ (z>=0.04) & (z<=zlim)] - 1))
        DA_reldev = max(np.abs(DA_fit / DA[z <= zlim] - 1))
        f_reldev = max(np.abs(f_fit.sol(zTMP)[0] / f.sol(zTMP)[0] - 1))

        return DA_reldev, f_reldev

    def compute_Pade_coefficients(self, params):
        """
        Returns the Pade coefficients for w computed from params and the maximum
        and minimum residual in absolute value.
        """
        self._params = params
        self._cosmo.set(params)

        try:
            self._cosmo.compute()
            b = self._cosmo.get_background()
            shoot = self._cosmo.get_current_derived_parameters(
                ['tuning_parameter'])['tuning_parameter']
        except Exception as e:
            self._cosmo.struct_cleanup()
            self._cosmo.empty()
            raise e

        self._cosmo.struct_cleanup()
        self._cosmo.empty()

        xDict = {
            'z': b['z'],
            'z+1': b['z'] + 1,
            'a': 1. / (b['z'] + 1),
            'log(a)': -np.log(b['z'] + 1),
            'log(z+1)': np.log(b['z'] + 1)
        }

        X = xDict[self._Pade_xvar]
        w = b['w_smg']

        if self._Pade_xReverse:
            X = X[::-1]
            w = w[::-1]

        PadeOrder = np.array(self._PadeOrder)

        if not self._Pade_increase:
            reduceOrder = [[0, 0], [1, 0], [0, 1], [2, 0], [2, 1], [3, 1]]
            orderList = PadeOrder - reduceOrder

        else:
            orderList = [[1, 1], [2, 0], [3, 0], [2, 1], [2, 2], [3,
                                                                  1], [4, 0],
                         [2, 3], [3, 2], [4, 1], [5, 0], [3, 3], [4,
                                                                  2], [5, 1],
                         [3, 4], [4, 3], [5, 2], [3, 5], [4, 4], [5, 3],
                         [4, 5], [5, 4], [5, 5]]

        r = np.array([np.inf])
        for order in orderList:
            # Increase order of Pade up to [5/5].
            try:
                padeCoefficientsTMP, padeFitTMP = fit_pade(
                    X, w, *order, maxfev=self._Pade_maxfev)
                rTMP = np.abs(padeFitTMP / w - 1.)
                if self._Pade_increase and (np.max(rTMP) >
                                            self._Pade_accuracy):
                    if np.max(rTMP) < np.max(r):
                        padeCoefficients = padeCoefficientsTMP
                        r = rTMP
                    continue
                else:
                    padeCoefficients = padeCoefficientsTMP
                    r = rTMP
                    break
            except Exception as e:
                if (order == orderList[-1]) and (len(r) == 1):
                    raise e

                continue

        zeros = (PadeOrder - order)

        numCoefficients = np.append(padeCoefficients[:order[0] + 1],
                                    [0.] * zeros[0])
        denCoefficients = np.append(padeCoefficients[order[0] + 1:],
                                    [0.] * zeros[1])
        padeCoefficients = np.concatenate([numCoefficients, denCoefficients])

        return np.concatenate([padeCoefficients, [np.min(r),
                                                  np.max(r)]]), shoot

    def compute_bins_from_params(self, params_func, number_of_rows):
        """
        Compute the w_i bins for the models given by the function
        params_func iterated #iterations.
        """
        self._create_output_files()

        wzbins = []
        wabins = []
        params = []
        shoot = []

        for row in range(number_of_rows):
            sys.stdout.write("{}/{}\n".format(row + 1, number_of_rows))
            params_tmp = params_func().copy()

            try:
                wzbins_tmp, wabins_tmp, shoot_tmp = self.compute_bins(
                    params_tmp)
                wzbins.append(wzbins_tmp)
                wabins.append(wabins_tmp)
                params.append(params_tmp)
                shoot.append(shoot_tmp)
                # Easily generalizable. It could be inputted a list with the
                # desired derived parameters and store the whole dictionary.
            except Exception as e:
                sys.stderr.write(str(self._params) + '\n')
                sys.stderr.write(str(e))
                sys.stderr.write('\n')
                continue

            if len(wzbins) == 5:
                self._save_computed(params, shoot, [wzbins, wabins])

                params = []
                wzbins = []
                wabins = []
                shoot = []

        self._save_computed(params, shoot, [wzbins, wabins])

    def compute_Pade_from_params(self, params_func, number_of_rows):
        """
        Compute the w_i bins for the models given by the function
        params_func iterated #iterations.
        """
        self._create_output_files()

        wbins = []
        params = []
        shoot = []

        for row in range(number_of_rows):
            sys.stdout.write("{}/{}\n".format(row + 1, number_of_rows))
            params_tmp = params_func().copy()

            try:
                wbins_tmp, shoot_tmp = self.compute_Pade_coefficients(
                    params_tmp)
                wbins.append(wbins_tmp)
                params.append(params_tmp)
                shoot.append(shoot_tmp)
                # Easily generalizable. It could be inputted a list with the
                # desired derived parameters and store the whole dictionary.
            except Exception as e:
                sys.stderr.write(str(self._params) + '\n')
                sys.stderr.write(str(e))
                sys.stderr.write('\n')
                continue

            if len(wbins) == 5:
                self._save_computed(params, shoot, wbins)

                params = []
                wbins = []
                shoot = []

        self._save_computed(params, shoot, wbins)

    def compute_fit_from_params(self, params_func, number_of_rows):
        """
        Compute the fit for the models given by the function
        params_func iterated #iterations.

        The variable to fit is chosen in self.set_fit
        """
        # TODO: If this grows, consider creating a separate method
        if self._variable_to_fit == 'F':
            fit_variable_function = self.compute_fit_coefficients_for_F
        elif self._variable_to_fit == 'w':
            fit_variable_function = self.compute_fit_coefficients_for_w
        elif self._variable_to_fit == 'logRho':
            fit_variable_function = self.compute_fit_coefficients_for_logRho
        elif self._variable_to_fit == 'logX':
            fit_variable_function = self.compute_fit_coefficients_for_logX
        elif self._variable_to_fit == 'X':
            fit_variable_function = self.compute_fit_coefficients_for_X

        self._create_output_files()

        coeffs = []
        params = []
        shoot = []

        for row in range(number_of_rows):
            sys.stdout.write("{}/{}\n".format(row + 1, number_of_rows))
            # params_tmp = params_func().copy()

            try:
                coeffs_tmp, shoot_tmp = fit_variable_function(params_func())
                coeffs.append(coeffs_tmp)
                params.append(self._params.copy())
                shoot.append(shoot_tmp)
                # Easily generalizable. It could be inputted a list with the
                # desired derived parameters and store the whole dictionary.
            except Exception as e:
                sys.stderr.write(str(self._params) + '\n')
                sys.stderr.write(str(e))
                sys.stderr.write('\n')
                continue

            if len(coeffs) == 5:
                self._save_computed(params, shoot, coeffs)

                params = []
                coeffs = []
                shoot = []

        self._save_computed(params, shoot, coeffs)

    def compute_bins_from_file(self, path):
        """
        Compute the w_i bins for the models given in path.
        """
        if self._computed is True:
            print(
                "Bins already computed. Use reset if you want to compute it again"
            )
            return

        self._path = path

        self._read_from_file(path)

        def params_gen(length):
            row = 0
            while row < length:
                yield self._params_from_row(row)
                row += 1

        params = params_gen(len(self.params_smg))

        self.compute_bins_from_params(params.next, len(self.params_smg))

    def _create_output_files(self):
        """
        Initialize the output files.
        """
        # TODO: Add check if files exist
        with open(self._fparamsname, 'a') as f:
            f.write('# ' + "Dictionary of params to use with cosmo.set()" +
                    '\n')

        with open(self._fshootname, 'a') as f:
            f.write('# ' + "Shooting variable value" + '\n')

        if self._binType == 'bins':
            with open(self._fwzname, 'a') as f:
                f.write('# ' + "Bins on redshift" + '\n')
                f.write('# ' + str(self._zbins).strip('[]').replace('\n', '') +
                        '\n')

            with open(self._fwaname, 'a') as f:
                f.write('# ' + "Bins on scale factor" + '\n')
                f.write('# ' + str(self._abins).strip('[]').replace('\n', '') +
                        '\n')
        elif self._binType == 'Pade':
            with open(self._fPadename, 'a') as f:
                f.write('# ' + "Pade fit for temporal variable {} \n".format(
                    self._Pade_xvar))
                coeff_header_num = [
                    'num_{}'.format(n) for n in range(self._PadeOrder[0] + 1)
                ]
                coeff_header_den = [
                    'den_{}'.format(n + 1) for n in range(self._PadeOrder[1])
                ]
                res_header = ['min(residual)', 'max(residual)']
                f.write('# ' + ' '.join(coeff_header_num + coeff_header_den +
                                        res_header) + '\n')
        elif self._binType == 'fit':
            with open(self._fFitname, 'a') as f:
                f.write('# ' +
                        "{} fit for temporal variable {} of {}\n".format(
                            self._fit_function_label, self._fit_xvar,
                            self._variable_to_fit))
                coeff_header_num = [
                    'c_{}'.format(n) for n in range(self._n_coeffs)
                ]
                res_header = ['max(rel.dev. D_A)', 'max(rel.dev. f)']
                f.write('# ' + ' '.join(coeff_header_num + res_header) + '\n')

    def _save_computed(self, params, shoot, wbins):
        """
        Save stored iterations in file.
        """
        with open(self._fparamsname, 'a') as f:
            for i in params:
                f.write(str(i) + '\n')

        with open(self._fshootname, 'a') as f:
            np.savetxt(f, shoot)

        if self._binType == 'bins':
            wzbins, wabins = wbins
            with open(self._fwzname, 'a') as f:
                np.savetxt(f, wzbins)

            with open(self._fwaname, 'a') as f:
                np.savetxt(f, wabins)
        elif self._binType == 'Pade':
            with open(self._fPadename, 'a') as f:
                np.savetxt(f, wbins)
        elif self._binType == 'fit':
            with open(self._fFitname, 'a') as f:
                np.savetxt(f, wbins)

    def reset(self):
        """
        Reset class
        """
        self._cosmo.struct_cleanup()
        self._cosmo.empty()
        self._set_default_values()
Пример #12
0
Theta0 = 0.25 * one_k['delta_g']
phi = one_k['phi']
psi = one_k['psi']
theta_b = one_k['theta_b']
a = one_k['a']
# compute related quantitites
R = 3. / 4. * M.Omega_b() / M.Omega_g() * a  # R = 3/4 * (rho_b/rho_gamma)
zero_point = -(1. + R) * psi  # zero point of oscillations: -(1.+R)*psi
#
# get Theta0 oscillation amplitude (for vertical scale of plot)
#
Theta0_amp = max(Theta0.max(), -Theta0.min())
#
# get the time of decoupling
#
quantities = M.get_current_derived_parameters(['tau_rec'])
# print times.viewkeys()
tau_rec = quantities['tau_rec']
#
# use table of background quantitites to find the time of
# Hubble crossing (k / (aH)= 2 pi), sound horizon crossing (k * rs = 2pi)
#
background = M.get_background()  # load background table
#print background.viewkeys()
#
background_tau = background[
    'conf. time [Mpc]']  # read confromal times in background table
background_z = background['z']  # read redshift
background_k_over_aH = k / background['H [1/Mpc]'] * (1. + background['z']
                                                      )  # read k/aH = k(1+z)/H
background_k_rs = k * background['comov.snd.hrz.']  # read k * rs
Пример #13
0
class tsz_gal_cl:
    def __init__(self):
        # print 'Class for tSZ Cl'
        # self.ptilde = np.loadtxt(LIBDIR+'/aux_files/ptilde.txt')
        self.fort_lib_cl = cdll.LoadLibrary(LIBDIR + "/source/calc_cl")

        self.fort_lib_cl.calc_cl_.argtypes = [
            POINTER(c_double),  #h0
            POINTER(c_double),  #obh2
            POINTER(c_double),  #och2
            POINTER(c_double),  #mnu
            POINTER(c_double),  #bias
            POINTER(c_double),  #Mcut
            POINTER(c_double),  #M1
            POINTER(c_double),  #kappa
            POINTER(c_double),  #sigma_Ncen
            POINTER(c_double),  #alp_Nsat
            POINTER(c_double),  #rmax
            POINTER(c_double),  #rgs
            POINTER(c_int64),  #pk_nk
            POINTER(c_int64),  #pk_nz
            np.ctypeslib.ndpointer(dtype=np.double),  #karr
            np.ctypeslib.ndpointer(dtype=np.double),  #pkarr
            np.ctypeslib.ndpointer(dtype=np.double),  #dndz
            POINTER(c_int64),  #nz_dndz
            POINTER(c_double),  #z1
            POINTER(c_double),  #z2
            POINTER(c_double),  #z1_ng
            POINTER(c_double),  #z2_ng
            POINTER(c_int64),  #nl
            np.ctypeslib.ndpointer(dtype=np.double),  #ell
            np.ctypeslib.ndpointer(dtype=np.double),  #gg
            np.ctypeslib.ndpointer(dtype=np.double),  #gy
            np.ctypeslib.ndpointer(dtype=np.double),  #tll
            POINTER(c_double),  #ng(z1<z<z2)
            POINTER(c_int64),  #flag_nu
            POINTER(c_int64),  #flag_tll
            POINTER(c_int64),  #nm
            POINTER(c_int64)  #nz
        ]
        self.fort_lib_cl.calc_cl_.restype = c_void_p

        # Calcualtion setup
        self.kmin = 1e-3
        self.kmax = 5.
        self.zmax = 4.  # should be consistent with fortran code
        self.nk_pk = 200
        self.nz_pk = 51

        # Class
        self.cosmo = Class()

    def get_tsz_cl(self, ell_arr, params, dndz, z1, z2, z1_ng, z2_ng, nm, nz):
        self.zmin = z1
        self.zmax = z2
        obh2 = params['obh2']
        och2 = params['och2']
        As = params['As']
        ns = params['ns']
        mnu = params['mnu']
        mass_bias = params['mass_bias']
        Mcut = params['Mcut']
        M1 = params['M1']
        kappa = params['kappa']
        sigma_Ncen = params['sigma_Ncen']
        alp_Nsat = params['alp_Nsat']
        rmax = params['rmax']
        rgs = params['rgs']
        flag_nu_logic = params['flag_nu']
        flag_tll_logic = params['flag_tll']
        if type(flag_nu_logic) != bool:
            print 'flag_nu must be boolean.'
            sys.exit()
        if flag_nu_logic:
            flag_nu = 1
        else:
            flag_nu = 0
        if type(flag_tll_logic) != bool:
            print 'flag_tll must be boolean.'
            sys.exit()
        if flag_tll_logic:
            flag_tll = 1
        else:
            flag_tll = 0

        if 'theta' in params.keys():
            theta = params['theta']
            pars = {'output':'mPk','100*theta_s':theta,
                    'omega_b':obh2,'omega_cdm':och2,
                    'A_s':As,'n_s':ns,\
                    'N_ur':0.00641,'N_ncdm':1,'m_ncdm':mnu/3.,\
                    'T_ncdm':0.71611,\
                    'P_k_max_h/Mpc': self.kmax,'z_max_pk':self.zmax,\
                    'deg_ncdm':3.}
            self.cosmo.set(pars)
            self.cosmo.compute()
            h0 = self.cosmo.h()
        elif 'h0' in params.keys():
            h0 = params['h0']
            pars = {'output':'mPk','h':h0,
                    'omega_b':obh2,'omega_cdm':och2,
                    'A_s':As,'n_s':ns,\
                    'N_ur':0.00641,'N_ncdm':1,'m_ncdm':mnu/3.,\
                    'T_ncdm':0.71611,\
                    'P_k_max_h/Mpc': self.kmax,'z_max_pk':self.zmax,\
                    'deg_ncdm':3.}
            self.cosmo.set(pars)
            self.cosmo.compute()
        derived = self.cosmo.get_current_derived_parameters(
            ['100*theta_s', 'sigma8'])
        vz = (self.cosmo.angular_distance(z2_ng)**3*(1+z2_ng)**3 \
              -self.cosmo.angular_distance(z1_ng)**3*(1+z1_ng)**3)
        vz = vz * h0**3 * 4. * np.pi / 3.
        derived['vz'] = vz

        # get matter power spectra
        kh_arr = np.logspace(np.log10(self.kmin), np.log10(self.kmax),
                             self.nk_pk)
        kh = np.zeros((self.nz_pk, self.nk_pk))
        pk = np.zeros((self.nz_pk, self.nk_pk))
        pk_zarr = np.linspace(self.zmin, self.zmax, self.nz_pk)
        for i in range(self.nz_pk):
            kh[i, :] = kh_arr
            if flag_nu == 0:
                pk[i, :] = np.array([
                    self.cosmo.pk(k * h0, pk_zarr[i]) * h0**3 for k in kh_arr
                ])
            elif flag_nu == 1:
                pk[i, :] = np.array([
                    self.cosmo.pk_cb(k * h0, pk_zarr[i]) * h0**3
                    for k in kh_arr
                ])

        # params
        h0_in = byref(c_double(h0))
        obh2_in = byref(c_double(obh2))
        och2_in = byref(c_double(och2))
        mnu_in = byref(c_double(mnu))
        mass_bias_in = byref(c_double(mass_bias))
        Mcut_in = byref(c_double(Mcut))
        M1_in = byref(c_double(M1))
        kappa_in = byref(c_double(kappa))
        sigma_Ncen_in = byref(c_double(sigma_Ncen))
        alp_Nsat_in = byref(c_double(alp_Nsat))
        rmax_in = byref(c_double(rmax))
        rgs_in = byref(c_double(rgs))
        flag_nu_in = byref(c_int64(flag_nu))
        flag_tll_in = byref(c_int64(flag_tll))

        # dNdz
        nz_dndz = byref(c_int64(len(dndz)))

        # integration setting
        z1_in = byref(c_double(self.zmin))
        z2_in = byref(c_double(self.zmax))

        # outputs
        nl = len(ell_arr)
        cl_gg = np.zeros((2, nl))
        cl_gy = np.zeros((2, nl))
        tll = np.zeros((nl * 2, nl * 2))
        ng = c_double(0.0)
        nl = c_int64(nl)

        self.fort_lib_cl.calc_cl_(
                h0_in, obh2_in, och2_in, mnu_in,\
                mass_bias_in, \
                Mcut_in, M1_in, kappa_in, sigma_Ncen_in, alp_Nsat_in,\
                rmax_in, rgs_in,\
                byref(c_int64(self.nk_pk)), byref(c_int64(self.nz_pk)),\
                np.array(kh),np.array(pk),\
                np.array(dndz),nz_dndz,\
                z1_in, z2_in,\
                byref(c_double(z1_ng)),byref(c_double(z2_ng)),\
                nl,np.array(ell_arr),\
                cl_gg,cl_gy,tll,ng,\
                flag_nu_in,flag_tll_in,\
                c_int64(nm), c_int64(nz)
                )

        self.cosmo.struct_cleanup()
        return cl_gg, cl_gy, tll, ng.value, derived
Пример #14
0
class classy(SlikPlugin):
    """
    Plugin for CLASS.

    Credit: Brent Follin, Teresa Hamill
    """

    #{cosmoslik name : class name}
    name_mapping = {'As':'A_s',
                    'ns':'n_s',
                    'r':'r',
                    'nt':'n_t',
                    'ombh2':'omega_b',
                    'omch2':'omega_cdm',
                    'omnuh2':'omega_ncdm',
                    'tau':'tau_reio',
                    'H0':'H0',
                    'massive_neutrinos':'N_ncdm',
                    'massless_neutrinos':'N_ur',
                    'Yp':'YHe',
                    'pivot_scalar':'k_pivot'}


    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()


    def __call__(self,
                 ombh2,
                 omch2,
                 H0,
                 As,
                 ns,
                 tau,
                 omnuh2, #0.006
                 w=None,
                 r=None,
                 nrun=None,
                 omk=0,
                 Yp=None,
                 Tcmb=2.7255,
                 massive_neutrinos=1,
                 massless_neutrinos=2.046,
                 l_max_scalar=3000,
                 l_max_tensor=3000,
                 pivot_scalar=0.002,
                 outputs=[],
                 **kwargs):


        
        self.model.set(output='tCl, lCl, pCl',
                       lensing='yes',
                       l_max_scalars=l_max_scalar,
                       **{self.name_mapping[k]:v for k,v in locals().items() 
                          if k in self.name_mapping and v is not None})
        self.model.compute()

        ell = arange(l_max_scalar+1)
        self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max_scalar)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
                           for x in ['TT','TE','EE','BB','PP','TP']}

        self.model.struct_cleanup()
        self.model.empty()
        
        return self.cmb_result

    def get_bao_observables(self, z):
        return {'H':self.model.Hubble(z),
                'D_A':self.model.angular_distance(z),
                'c':1.0,
                'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
Пример #15
0
class classy(SlikPlugin):
    """
    Plugin for CLASS.
    Credit: Brent Follin, Teresa Hamill, Andy Scacco
    """

    #{cosmoslik name : class name} - This needs to be done even for variables with the same name (because of for loop in self.model.set)!
    name_mapping = {'As':'A_s',
                    'ns':'n_s',
                    'r':'r',
                    'k_c':'k_c',
                    'alpha_exp':'alpha_exp',
                    'nt':'n_t',
                    'ombh2':'omega_b',
                    'omch2':'omega_cdm',
                    'omnuh2':'omega_ncdm',
                    'tau':'tau_reio',
                    'H0':'H0',
                    'massive_neutrinos':'N_ncdm',
                    'massless_neutrinos':'N_ur',
                    'Yp':'YHe',
                    'pivot_scalar':'k_pivot',
                    #'Tcmb':'T_cmb',
                    #'P_k_max_hinvMpc':'P_k_max_h/Mpc'
                    #'w':'w0_fld',
                    #'nrun':'alpha_s',
                    #'omk':'Omega_k',
                    #'l_max_scalar':'l_max_scalars',
                    #'l_max_tensor':'l_max_tensors'
                    }


    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()


    def __call__(self,
                 ombh2,
                 omch2,
                 H0,
                 As,
                 ns,
                 k_c,
                 alpha_exp,
                 tau,
                 #omnuh2=0, #0.006  #None means that Class will take the default for this, maybe?
                 w=None,
                 r=None,
                 nrun=None,
                 omk=0,
                 Yp=None,
                 Tcmb=2.7255,
                 #massive_neutrinos=0,
                 massless_neutrinos=3.046,
                 l_max_scalar=3000,
                 l_max_tensor=3000,
                 pivot_scalar=0.05,
                 outputs=[],
                 **kwargs):


        
        self.model.set(output='tCl, lCl, pCl',
                       lensing='yes',
                       l_max_scalars=l_max_scalar,
                       **{self.name_mapping[k]:v for k,v in locals().items() 
                          if k in self.name_mapping and v is not None})
        self.model.compute()

        ell = arange(l_max_scalar+1)
        self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max_scalar)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
                           for x in ['TT','TE','EE','BB','PP','TP']}

        self.model.struct_cleanup()
        self.model.empty()
        
        return self.cmb_result

    def get_bao_observables(self, z):
        return {'H':self.model.Hubble(z),
                'D_A':self.model.angular_distance(z),
                'c':1.0,
                'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
Пример #16
0
class classy(SlikPlugin):
    """
    Compute the CMB power spectrum with CLASS.

    Based on work by: Brent Follin, Teresa Hamill
    """

    #{cosmoslik name : class name}
    name_mapping = {
        'As': 'A_s',
        'lmax': 'l_max_scalars',
        'mnu': 'm_ncdm',
        'Neff': 'N_ncdm',
        'ns': 'n_s',
        'nt': 'n_t',
        'ombh2': 'omega_b',
        'omch2': 'omega_cdm',
        'omk': 'Omega_k',
        'pivot_scalar': 'k_pivot',
        'r': 'r',
        'tau': 'tau_reio',
        'Tcmb': 'T_cmb',
        'Yp': 'YHe',
    }

    def __init__(self, **defaults):
        super().__init__()
        from classy import Class
        self.model = Class()
        self.defaults = defaults

    def convert_params(self, **params):
        """
        Convert from CosmoSlik params to CLASS
        """
        params = {self.name_mapping.get(k, k): v for k, v in params.items()}
        if 'theta' in params:
            params['100*theta_s'] = 100 * params.pop('theta')
        params['lensing'] = 'yes' if params.pop('DoLensing', True) else 'no'
        return params

    def __call__(self,
                 As=None,
                 DoLensing=True,
                 H0=None,
                 lmax=None,
                 mnu=None,
                 Neff=None,
                 nrun=None,
                 ns=None,
                 ombh2=None,
                 omch2=None,
                 omk=None,
                 output='tCl, lCl, pCl',
                 pivot_scalar=None,
                 r=None,
                 tau=None,
                 Tcmb=2.7255,
                 theta=None,
                 w=None,
                 Yp=None,
                 nowarn=False,
                 **kwargs):

        if not nowarn and kwargs:
            print('Warning: passing unknown parameters to CLASS: ' +
                  str(kwargs) + ' (set nowarn=True to turn off this message.)')

        params = dict(
            self.defaults, **{
                k: v
                for k, v in arguments(include_kwargs=False,
                                      exclude=["nowarn"]).items()
                if v is not None
            })
        self.model.set(self.convert_params(**params))
        self.model.compute()

        lmax = params['lmax']
        ell = arange(lmax + 1)
        if params['DoLensing'] == True:
            self.cmb_result = {
                x: (self.model.lensed_cl(lmax)[x.lower()]) * Tcmb**2 * 1e12 *
                ell * (ell + 1) / 2 / pi
                for x in ['TT', 'TE', 'EE', 'BB', 'PP', 'TP']
            }
        else:
            self.cmb_result = {
                x: (self.model.raw_cl(lmax)[x.lower()]) * Tcmb**2 * 1e12 *
                ell * (ell + 1) / 2 / pi
                for x in ['TT']
            }

        self.model.struct_cleanup()
        self.model.empty()

        return self.cmb_result

    def get_bao_observables(self, z):
        return {
            'H':
            self.model.Hubble(z),
            'D_A':
            self.model.angular_distance(z),
            'c':
            1.0,
            'r_d':
            (self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']
        }
Пример #17
0
class TestClass(unittest.TestCase):
    """
    Testing Class and its wrapper classy on different cosmologies

    To run it, do
    ~] nosetest test_class.py

    It will run many times Class, on different cosmological scenarios, and
    everytime testing for different output possibilities (none asked, only mPk,
    etc..)

    """
    def setUp(self):
        """
        set up data used in the tests.
        setUp is called before each test function execution.
        """
        self.cosmo = Class()

        self.verbose = {
            'input_verbose': 1,
            'background_verbose': 1,
            'thermodynamics_verbose': 1,
            'perturbations_verbose': 1,
            'transfer_verbose': 1,
            'primordial_verbose': 1,
            'spectra_verbose': 1,
            'nonlinear_verbose': 1,
            'lensing_verbose': 1,
            'output_verbose': 1
        }
        self.scenario = {'lensing': 'yes'}

    def tearDown(self):
        self.cosmo.struct_cleanup()
        self.cosmo.empty()
        del self.scenario

    @parameterized.expand(
        itertools.product((
            'LCDM',
            'Mnu',
            'Positive_Omega_k',
            'Negative_Omega_k',
            'Isocurvature_modes',
        ), ({
            'output': ''
        }, {
            'output': 'mPk'
        }, {
            'output': 'tCl'
        }, {
            'output': 'tCl pCl lCl'
        }, {
            'output': 'mPk tCl lCl',
            'P_k_max_h/Mpc': 10
        }, {
            'output': 'nCl sCl'
        }, {
            'output': 'tCl pCl lCl nCl sCl'
        }), ({
            'gauge': 'newtonian'
        }, {
            'gauge': 'sync'
        }), ({}, {
            'non linear': 'halofit'
        })))
    def test_wrapper_implementation(self, name, scenario, gauge, nonlinear):
        """Create a few instances based on different cosmologies"""
        if name == 'Mnu':
            self.scenario.update({'N_ncdm': 1, 'm_ncdm': 0.06})
        elif name == 'Positive_Omega_k':
            self.scenario.update({'Omega_k': 0.01})
        elif name == 'Negative_Omega_k':
            self.scenario.update({'Omega_k': -0.01})
        elif name == 'Isocurvature_modes':
            self.scenario.update({'ic': 'ad,nid,cdi', 'c_ad_cdi': -0.5})

        self.scenario.update(scenario)
        if scenario != {}:
            self.scenario.update(gauge)
        self.scenario.update(nonlinear)

        sys.stderr.write('\n\n---------------------------------\n')
        sys.stderr.write('| Test case %s |\n' % name)
        sys.stderr.write('---------------------------------\n')
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")

        setting = self.cosmo.set(
            dict(self.verbose.items() + self.scenario.items()))
        self.assertTrue(setting, "Class failed to initialize with input dict")

        cl_list = ['tCl', 'lCl', 'pCl', 'nCl', 'sCl']

        # Depending on the cases, the compute should fail or not
        should_fail = True
        output = self.scenario['output'].split()
        for elem in output:
            if elem in ['tCl', 'pCl']:
                for elem2 in output:
                    if elem2 == 'lCl':
                        should_fail = False
                        break

        if not should_fail:
            self.cosmo.compute()
        else:
            self.assertRaises(CosmoSevereError, self.cosmo.compute)
            return

        self.assertTrue(self.cosmo.state,
                        "Class failed to go through all __init__ methods")
        if self.cosmo.state:
            print '--> Class is ready'
        # Depending
        if 'output' in self.scenario.keys():
            # Positive tests
            output = self.scenario['output']
            for elem in output.split():
                if elem in cl_list:
                    print '--> testing raw_cl function'
                    cl = self.cosmo.raw_cl(100)
                    self.assertIsNotNone(cl, "raw_cl returned nothing")
                    self.assertEqual(
                        np.shape(cl['tt'])[0], 101,
                        "raw_cl returned wrong size")
                if elem == 'mPk':
                    print '--> testing pk function'
                    pk = self.cosmo.pk(0.1, 0)
                    self.assertIsNotNone(pk, "pk returned nothing")
            # Negative tests of output functions
            if not any([elem in cl_list for elem in output.split()]):
                print '--> testing absence of any Cl'
                self.assertRaises(CosmoSevereError, self.cosmo.raw_cl, 100)
            if 'mPk' not in self.scenario['output'].split():
                print '--> testing absence of mPk'
                #args = (0.1, 0)
                self.assertRaises(CosmoSevereError, self.cosmo.pk, 0.1, 0)

    @parameterized.expand(
        itertools.product(('massless', 'massive', 'both'),
                          ('photons', 'massless', 'exact'), ('t', 's, t')))
    def test_tensors(self, scenario, method, modes):
        """Test the new tensor mode implementation"""
        self.scenario = {}
        if scenario == 'massless':
            self.scenario.update({'N_eff': 3.046, 'N_ncdm': 0})
        elif scenario == 'massiv':
            self.scenario.update({
                'N_eff': 0,
                'N_ncdm': 2,
                'm_ncdm': '0.03, 0.04',
                'deg_ncdm': '2, 1'
            })
        elif scenario == 'both':
            self.scenario.update({
                'N_eff': 1.5,
                'N_ncdm': 2,
                'm_ncdm': '0.03, 0.04',
                'deg_ncdm': '1, 0.5'
            })

        sys.stderr.write('\n\n---------------------------------\n')
        sys.stderr.write('| Test case: %s %s %s |\n' %
                         (scenario, method, modes))
        sys.stderr.write('---------------------------------\n')
        self.scenario.update({
            'tensor method': method,
            'modes': modes,
            'output': 'tCl, pCl'
        })
        for key, value in self.scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")
        self.cosmo.set(dict(self.verbose.items() + self.scenario.items()))
        self.cosmo.compute()

    @parameterized.expand(
        itertools.izip(
            powerset(['100*theta_s', 'Omega_dcdmdr']),
            powerset([1.04, 0.20]),
        ))
    def test_shooting_method(self, variables, values):
        Omega_cdm = 0.25

        scenario = {
            'Omega_b': 0.05,
        }

        for variable, value in zip(variables, values):
            scenario.update({variable: value})

        if 'Omega_dcdmdr' in variables:
            scenario.update({
                'Gamma_dcdm': 100,
                'Omega_cdm': Omega_cdm - scenario['Omega_dcdmdr']
            })
        else:
            scenario.update({'Omega_cdm': Omega_cdm})

        sys.stderr.write('\n\n---------------------------------\n')
        sys.stderr.write('| Test shooting: %s |\n' % (', '.join(variables)))
        sys.stderr.write('---------------------------------\n')
        for key, value in scenario.iteritems():
            sys.stderr.write("%s = %s\n" % (key, value))
        sys.stderr.write("\n")

        scenario.update(self.verbose)
        self.assertTrue(self.cosmo.set(scenario),
                        "Class failed to initialise with this input")
        self.assertRaises
        self.cosmo.compute()

        # Now, check that the values are properly extracted
        for variable, value in zip(variables, values):
            if variable == '100*theta_s':
                computed_value = self.cosmo.get_current_derived_parameters(
                    [variable])[variable]
                self.assertAlmostEqual(value, computed_value, places=5)
Пример #18
0
class Model():
    def __init__(self, cosmo=None):
        """
        Initialize the Model class. By default Model uses its own Class
        instance.

        cosmo = external Class instance. Default is None
        """
        if cosmo:
            self.cosmo = cosmo
        else:
            self.cosmo = Class()
        self.computed = {}
        self.texnames = {}

    def __set_scale(self, axes, xscale, yscale):
        """
        Set scales for axes in axes array.

        axes = axes array (e.g. f, ax = plt.subplots(2,2))
        xscale = linear array of xscale.
        yscale = linear array of yscale.

        Scales are set once axes is flatten. Each plot is counted from left to
        right an from top to bottom.
        """
        for i, ax in enumerate(axes.flat):
            ax.set_xscale(xscale[i])
            ax.set_yscale(yscale[i])

    def __set_label(self, axes, xlabel, ylabel):
        """
        Set labels for axes in axes array.

        axes = axes array (e.g. f, ax = plt.subplots(2,2))
        xlabel = linear array of xlabels.
        ylabel = linear array of ylabels.

        Labels are set once axes is flatten. Each plot is counted from left to
        right an from top to bottom.
        """
        for i, ax in enumerate(axes.flat):
            ax.set_xlabel(xlabel[i])
            ax.set_ylabel(ylabel[i])

    def __store_cl(self, cl_dic):
        """
        Store cl's as (l*(l+1)/2pi)*cl, which is much more useful.
        """

        ell = cl_dic['ell'][2:]

        for cl, list_val in cl_dic.iteritems():
            list_val = list_val[2:]
            if (list_val == ell).all():
                cl_dic[cl] = list_val
                continue
            list_val = (ell * (ell + 1) / (2 * np.pi)) * list_val
            cl_dic[cl] = list_val  # Remove first two null items (l=0,1)

        return cl_dic

    def add_derived(self, varied_name, keys, value):
        """
        Add a derived parameter for varied_name dictionary.

        varied_name = varied variable's name.
        keys = list of keys in descending level.
        value = value to store for new dictionary key.
        """

        dic = self.computed[varied_name]

        for key in keys:
            if key not in dic:
                dic[key] = {}

            dic = dic[key]

        dic.update(value)

    def compute_models(self, params, varied_name, index_variable, values,
                       back=[], thermo=[], prim=[], pert=[], trans=[],
                       pk=[0.0001, 0.1, 100], extra=[], update=True,
                       cosmo_msg=False, texname=""):
        """
        Fill dic with the hi_class output structures for the model with given
        params, modifying the varied_name value with values.

        params = parameters to be set in Class. They must be in agreement with
                what is asked for.
        varied_name = the name of the variable you are modifying. It will be
                      used as key in dic assigned to its background structures.
        index_variable = variable's index in parameters_smg array.
        values = varied variable values you want to compute the cosmology for.
        back = list of variables to store from background. If 'all', store the
              whole dictionary.
        thermo = list of variables to store from thermodynamics. If 'all',
                  store the whole dictionary.
        prim = list of variables to store from primordial. If 'all', store the
               whole dictionary.
        pert = list of variables to store from perturbations. If 'all', store
               the whole dictionary.
        trans = list of variables to store from transfer. If 'all', store
                the whole dictionary. get_transfer accept two optional
                arguments: z=0 and output_format='class' (avaible options are
                'class' or 'camb'). If different values are desired, first
                item of trans must be {'z': value, 'output_format': value}.
        pk = list with the minimum and maximum k values to store the present
             matter power spectrum and the number of points [k_min, k_max,
             number_points]. Default [10^-4, 10^1, 100].
        extra = list of any of the method or objects defined in cosmo, e.g.
                w0_smg().  It will store {'method': cosmo.w0_smg()}
        update = if True update old computed[key] dictionary elsewise create a
                 new one.  Default: True.
        cosmo_msg = if True, print cosmo.compute() messages. Default: False.
        """

        key = varied_name

        if texname:
            self.set_texnames({varied_name: texname})
        elif key not in self.texnames:  # texname will not be set at this stage. No check required
            self.set_texnames({varied_name: varied_name})

        if (not update) or (key not in self.computed.keys()):
            self.computed[key] = od()

        for val in values:
            # key = "{}={}".format(varied_name, val)
            params["parameters_smg"] = inip.vary_params(params["parameters_smg"], [[index_variable, val]])

            # It might be after the try to not store empty dictionaries.
            # Nevertheless, I find more useful having them to keep track of
            # those failed and, perhaps, to implement a method to obtain them
            # with Omega_smg_debug.
            d = self.computed[key][val] = {}

            self.cosmo.empty()
            self.cosmo.set(params)

            try:
                self.cosmo.compute()
            except Exception, e:
                print "Error: skipping {}={}".format(key, val)
                if cosmo_msg:
                    print e

                continue

            d['tunned'] = self.cosmo.get_current_derived_parameters(['tuning_parameter'])['tuning_parameter']

            for lst in [[back, 'back', self.cosmo.get_background],
                        [thermo, 'thermo', self.cosmo.get_thermodynamics],
                        [prim, 'prim', self.cosmo.get_thermodynamics]]:
                if lst[0]:
                    output = lst[2]()
                    if lst[0][0] == 'all':
                        d[lst[1]] = output
                    else:
                        d[lst[1]] = {}
                        for item in back:
                            if type(item) is list:
                                d[lst[1]].update({item[0]: output[item[0]][item[1]]})
                            else:
                                d[lst[1]].update({item: output[item]})

            if pert:
                # Perturbation is tricky because it can accept two optional
                # argument for get_perturbations and this method returns a
                # dictionary {'kind_of_pert': [{variable: list_values}]}, where
                # each item in the list is for a k (chosen in params).
                if type(pert[0]) is dict:
                    output = self.cosmo.get_perturbations(pert[0]['z'], pert[0]['output_format'])
                    if pert[1] == 'all':
                        d['pert'] = output
                else:
                    output = self.cosmo.get_perturbations()
                    if pert[0] == 'all':
                        d['pert'] = output

                if (type(pert[0]) is not dict) and (pert[0] != 'all'):
                    d['pert'] = {}
                    for subkey, lst in output.iteritems():
                        d['pert'].update({subkey: []})
                        for n, kdic in enumerate(lst):  # Each item is for a k
                            d['pert'][subkey].append({})
                            for item in pert:
                                if type(item) is list:
                                    d['pert'][subkey][n].update({item[0]: kdic[item[0]][item[1]]})
                                else:
                                    d['pert'][subkey][n].update({item: kdic[item]})

            for i in extra:
                exec('d[i] = self.cosmo.{}'.format(i))

            try:
                d['cl'] = self.__store_cl(self.cosmo.raw_cl())
            except CosmoSevereError:
                pass

            try:
                d['lcl'] = self.__store_cl(self.cosmo.lensed_cl())
            except CosmoSevereError:
                pass

            try:
                d['dcl'] = self.cosmo.density_cl()
            except CosmoSevereError:
                pass


            if ("output" in self.cosmo.pars) and ('mPk' in self.cosmo.pars['output']):
                k_array = np.linspace(*pk)
                pk_array = np.array([self.cosmo.pk(k, 0) for k in k_array])

                d['pk'] = {'k': k_array, 'pk': pk_array}

            self.cosmo.struct_cleanup()
Пример #19
0
class classy(SlikPlugin):
    """
    Plugin for CLASS.
    Credit: Brent Follin, Teresa Hamill, Andy Scacco
    """

    #{cosmoslik name : class name} - This needs to be done even for variables with the same name (because of for loop in self.model.set)!
    name_mapping = {'As':'A_s',
                    'ns':'n_s',
                    'r':'r',
                    'phi0':'custom1',
                    'm6':'custom2',
                    'nt':'n_t',
                    'ombh2':'omega_b',
                    'omch2':'omega_cdm',
                    'omnuh2':'omega_ncdm',
                    'tau':'tau_reio',
                    'H0':'H0',
                    'massive_neutrinos':'N_ncdm',
                    'massless_neutrinos':'N_ur',
                    'Yp':'YHe',
                    'pivot_scalar':'k_pivot',
                    }


    def __init__(self):
        super(classy,self).__init__()

        try:
            from classy import Class
        except ImportError:
            raise Exception("Failed to import CLASS python wrapper 'Classy'.")

        self.model = Class()


    def __call__(self,
                 ombh2,
                 omch2,
                 H0,
                 As,
                 ns,
                 phi0,
                 m6,
                 tau,
                 w=None,
                 r=None,
                 nrun=None,
                 omk=0,
                 Yp=None,
                 Tcmb=2.7255,
                 massless_neutrinos=3.046,
                 l_max_scalar=3000,
                 l_max_tensor=3000,
                 pivot_scalar=0.05,
                 outputs=[],
                 **kwargs):

        d={self.name_mapping[k]:v for k,v in locals().items() 
        if k in self.name_mapping and v is not None}
        d['P_k_ini type']='external_Pk'
        d['modes'] = 's,t'
        self.model.set(output='tCl, lCl, pCl',
                       lensing='yes',
                       l_max_scalars=l_max_scalar,
                       command = '../LSODAtesnors/pk',
                       **d)
        self.model.compute()

        ell = arange(l_max_scalar+1)
        self.cmb_result = {'cl_%s'%x:(self.model.lensed_cl(l_max_scalar)[x.lower()])*Tcmb**2*1e12*ell*(ell+1)/2/pi
                           for x in ['TT','TE','EE','BB','PP','TP']}

        self.model.struct_cleanup()
        self.model.empty()
        
        return self.cmb_result

    def get_bao_observables(self, z):
        return {'H':self.model.Hubble(z),
                'D_A':self.model.angular_distance(z),
                'c':1.0,
                'r_d':(self.model.get_current_derived_parameters(['rs_rec']))['rs_rec']}
Пример #20
0
class classy(BoltzmannBase):
    # Name of the Class repo/folder and version to download
    _classy_repo_name = "lesgourg/class_public"
    _min_classy_version = "v2.9.3"
    _classy_repo_version = os.environ.get('CLASSY_REPO_VERSION', _min_classy_version)

    def initialize(self):
        """Importing CLASS from the correct path, if given, and if not, globally."""
        # Allow global import if no direct path specification
        allow_global = not self.path
        if not self.path and self.packages_path:
            self.path = self.get_path(self.packages_path)
        self.classy_module = self.is_installed(path=self.path, allow_global=allow_global)
        if not self.classy_module:
            raise NotInstalledError(
                self.log, "Could not find CLASS. Check error message above.")
        from classy import Class, CosmoSevereError, CosmoComputationError
        global CosmoComputationError, CosmoSevereError
        self.classy = Class()
        super().initialize()
        # Add general CLASS stuff
        self.extra_args["output"] = self.extra_args.get("output", "")
        if "sBBN file" in self.extra_args:
            self.extra_args["sBBN file"] = (
                self.extra_args["sBBN file"].format(classy=self.path))
        # Derived parameters that may not have been requested, but will be necessary later
        self.derived_extra = []
        self.log.info("Initialized!")

    def must_provide(self, **requirements):
        # Computed quantities required by the likelihood
        super().must_provide(**requirements)
        for k, v in self._must_provide.items():
            # Products and other computations
            if k == "Cl":
                if any(("t" in cl.lower()) for cl in v):
                    self.extra_args["output"] += " tCl"
                if any((("e" in cl.lower()) or ("b" in cl.lower())) for cl in v):
                    self.extra_args["output"] += " pCl"
                # For modern experiments, always lensed Cl's!
                self.extra_args["output"] += " lCl"
                self.extra_args["lensing"] = "yes"
                # For l_max_scalars, remember previous entries.
                self.extra_args["l_max_scalars"] = max(v.values())
                self.collectors[k] = Collector(
                    method="lensed_cl", kwargs={"lmax": self.extra_args["l_max_scalars"]})
                if 'T_cmb' not in self.derived_extra:
                    self.derived_extra += ['T_cmb']
            elif k == "Hubble":
                self.collectors[k] = Collector(
                    method="Hubble",
                    args=[np.atleast_1d(v["z"])],
                    args_names=["z"],
                    arg_array=0)
            elif k == "angular_diameter_distance":
                self.collectors[k] = Collector(
                    method="angular_distance",
                    args=[np.atleast_1d(v["z"])],
                    args_names=["z"],
                    arg_array=0)
            elif k == "comoving_radial_distance":
                self.collectors[k] = Collector(
                    method="z_of_r",
                    args_names=["z"],
                    args=[np.atleast_1d(v["z"])])
            elif isinstance(k, tuple) and k[0] == "Pk_grid":
                self.extra_args["output"] += " mPk"
                v = deepcopy(v)
                self.add_P_k_max(v.pop("k_max"), units="1/Mpc")
                # NB: Actually, only the max z is used, and the actual sampling in z
                # for computing P(k,z) is controlled by `perturb_sampling_stepsize`
                # (default: 0.1). But let's leave it like this in case this changes
                # in the future.
                self.add_z_for_matter_power(v.pop("z"))

                if v["nonlinear"] and "non linear" not in self.extra_args:
                    self.extra_args["non linear"] = non_linear_default_code
                pair = k[2:]
                if pair == ("delta_tot", "delta_tot"):
                    v["only_clustering_species"] = False
                elif pair == ("delta_nonu", "delta_nonu"):
                    v["only_clustering_species"] = True
                else:
                    raise LoggedError(self.log, "NotImplemented in CLASS: %r", pair)
                self.collectors[k] = Collector(
                    method="get_pk_and_k_and_z",
                    kwargs=v,
                    post=(lambda P, kk, z: (kk, z, np.array(P).T)))
            elif isinstance(k, tuple) and k[0] == "sigma_R":
                raise LoggedError(
                    self.log, "Classy sigma_R not implemented as yet - use CAMB only")
            elif v is None:
                k_translated = self.translate_param(k)
                if k_translated not in self.derived_extra:
                    self.derived_extra += [k_translated]
            else:
                raise LoggedError(self.log, "Requested product not known: %r", {k: v})
        # Derived parameters (if some need some additional computations)
        if any(("sigma8" in s) for s in self.output_params or requirements):
            self.extra_args["output"] += " mPk"
            self.add_P_k_max(1, units="1/Mpc")
        # Adding tensor modes if requested
        if self.extra_args.get("r") or "r" in self.input_params:
            self.extra_args["modes"] = "s,t"
        # If B spectrum with l>50, or lensing, recommend using Halofit
        cls = self._must_provide.get("Cl", {})
        has_BB_l_gt_50 = (any(("b" in cl.lower()) for cl in cls) and
                          max(cls[cl] for cl in cls if "b" in cl.lower()) > 50)
        has_lensing = any(("p" in cl.lower()) for cl in cls)
        if (has_BB_l_gt_50 or has_lensing) and not self.extra_args.get("non linear"):
            self.log.warning("Requesting BB for ell>50 or lensing Cl's: "
                             "using a non-linear code is recommended (and you are not "
                             "using any). To activate it, set "
                             "'non_linear: halofit|hmcode|...' in classy's 'extra_args'.")
        # Cleanup of products string
        self.extra_args["output"] = " ".join(set(self.extra_args["output"].split()))
        self.check_no_repeated_input_extra()

    def add_z_for_matter_power(self, z):
        if getattr(self, "z_for_matter_power", None) is None:
            self.z_for_matter_power = np.empty(0)
        self.z_for_matter_power = np.flip(np.sort(np.unique(np.concatenate(
            [self.z_for_matter_power, np.atleast_1d(z)]))), axis=0)
        self.extra_args["z_pk"] = " ".join(["%g" % zi for zi in self.z_for_matter_power])

    def add_P_k_max(self, k_max, units):
        r"""
        Unifies treatment of :math:`k_\mathrm{max}` for matter power spectrum:
        ``P_k_max_[1|h]/Mpc]``.

        Make ``units="1/Mpc"|"h/Mpc"``.
        """
        # Fiducial h conversion (high, though it may slow the computations)
        h_fid = 1
        if units == "h/Mpc":
            k_max *= h_fid
        # Take into account possible manual set of P_k_max_***h/Mpc*** through extra_args
        k_max_old = self.extra_args.pop(
            "P_k_max_1/Mpc", h_fid * self.extra_args.pop("P_k_max_h/Mpc", 0))
        self.extra_args["P_k_max_1/Mpc"] = max(k_max, k_max_old)

    def set(self, params_values_dict):
        # If no output requested, remove arguments that produce an error
        # (e.g. complaints if halofit requested but no Cl's computed.)
        # Needed for facilitating post-processing
        if not self.extra_args["output"]:
            for k in ["non linear"]:
                self.extra_args.pop(k, None)
        # Prepare parameters to be passed: this-iteration + extra
        args = {self.translate_param(p): v for p, v in params_values_dict.items()}
        args.update(self.extra_args)
        # Generate and save
        self.log.debug("Setting parameters: %r", args)
        self.classy.set(**args)

    def calculate(self, state, want_derived=True, **params_values_dict):
        # Set parameters
        self.set(params_values_dict)
        # Compute!
        try:
            self.classy.compute()
        # "Valid" failure of CLASS: parameters too extreme -> log and report
        except CosmoComputationError as e:
            if self.stop_at_error:
                self.log.error(
                    "Computation error (see traceback below)! "
                    "Parameters sent to CLASS: %r and %r.\n"
                    "To ignore this kind of error, make 'stop_at_error: False'.",
                    state["params"], dict(self.extra_args))
                raise
            else:
                self.log.debug("Computation of cosmological products failed. "
                               "Assigning 0 likelihood and going on. "
                               "The output of the CLASS error was %s" % e)
            return False
        # CLASS not correctly initialized, or input parameters not correct
        except CosmoSevereError:
            self.log.error("Serious error setting parameters or computing results. "
                           "The parameters passed were %r and %r. To see the original "
                           "CLASS' error traceback, make 'debug: True'.",
                           state["params"], self.extra_args)
            raise  # No LoggedError, so that CLASS traceback gets printed
        # Gather products
        for product, collector in self.collectors.items():
            # Special case: sigma8 needs H0, which cannot be known beforehand:
            if "sigma8" in self.collectors:
                self.collectors["sigma8"].args[0] = 8 / self.classy.h()
            method = getattr(self.classy, collector.method)
            arg_array = self.collectors[product].arg_array
            if arg_array is None:
                state[product] = method(
                    *self.collectors[product].args, **self.collectors[product].kwargs)
            elif isinstance(arg_array, int):
                state[product] = np.zeros(
                    len(self.collectors[product].args[arg_array]))
                for i, v in enumerate(self.collectors[product].args[arg_array]):
                    args = (list(self.collectors[product].args[:arg_array]) + [v] +
                            list(self.collectors[product].args[arg_array + 1:]))
                    state[product][i] = method(
                        *args, **self.collectors[product].kwargs)
            elif arg_array in self.collectors[product].kwargs:
                value = np.atleast_1d(self.collectors[product].kwargs[arg_array])
                state[product] = np.zeros(value.shape)
                for i, v in enumerate(value):
                    kwargs = deepcopy(self.collectors[product].kwargs)
                    kwargs[arg_array] = v
                    state[product][i] = method(
                        *self.collectors[product].args, **kwargs)
            if collector.post:
                state[product] = collector.post(*state[product])
        # Prepare derived parameters
        d, d_extra = self._get_derived_all(derived_requested=want_derived)
        if want_derived:
            state["derived"] = {p: d.get(p) for p in self.output_params}
            # Prepare necessary extra derived parameters
        state["derived_extra"] = deepcopy(d_extra)

    def _get_derived_all(self, derived_requested=True):
        """
        Returns a dictionary of derived parameters with their values,
        using the *current* state (i.e. it should only be called from
        the ``compute`` method).

        Parameter names are returned in CLASS nomenclature.

        To get a parameter *from a likelihood* use `get_param` instead.
        """
        # TODO: fails with derived_requested=False
        # Put all parameters in CLASS nomenclature (self.derived_extra already is)
        requested = [self.translate_param(p) for p in (
            self.output_params if derived_requested else [])]
        requested_and_extra = dict.fromkeys(set(requested).union(set(self.derived_extra)))
        # Parameters with their own getters
        if "rs_drag" in requested_and_extra:
            requested_and_extra["rs_drag"] = self.classy.rs_drag()
        if "Omega_nu" in requested_and_extra:
            requested_and_extra["Omega_nu"] = self.classy.Omega_nu
        if "T_cmb" in requested_and_extra:
            requested_and_extra["T_cmb"] = self.classy.T_cmb()
        # Get the rest using the general derived param getter
        # No need for error control: classy.get_current_derived_parameters is passed
        # every derived parameter not excluded before, and cause an error, indicating
        # which parameters are not recognized
        requested_and_extra.update(
            self.classy.get_current_derived_parameters(
                [p for p, v in requested_and_extra.items() if v is None]))
        # Separate the parameters before returning
        # Remember: self.output_params is in sampler nomenclature,
        # but self.derived_extra is in CLASS
        derived = {
            p: requested_and_extra[self.translate_param(p)] for p in self.output_params}
        derived_extra = {p: requested_and_extra[p] for p in self.derived_extra}
        return derived, derived_extra

    def get_Cl(self, ell_factor=False, units="FIRASmuK2"):
        try:
            cls = deepcopy(self._current_state["Cl"])
        except:
            raise LoggedError(
                self.log,
                "No Cl's were computed. Are you sure that you have requested them?")
        # unit conversion and ell_factor
        ells_factor = ((cls["ell"] + 1) * cls["ell"] / (2 * np.pi))[
                      2:] if ell_factor else 1
        units_factor = self._cmb_unit_factor(
            units, self._current_state['derived_extra']['T_cmb'])

        for cl in cls:
            if cl not in ['pp', 'ell']:
                cls[cl][2:] *= units_factor ** 2 * ells_factor
        if "pp" in cls and ell_factor:
            cls['pp'][2:] *= ells_factor ** 2 * (2 * np.pi)
        return cls

    def _get_z_dependent(self, quantity, z):
        try:
            z_name = next(k for k in ["redshifts", "z"]
                          if k in self.collectors[quantity].kwargs)
            computed_redshifts = self.collectors[quantity].kwargs[z_name]
        except StopIteration:
            computed_redshifts = self.collectors[quantity].args[
                self.collectors[quantity].args_names.index("z")]
        i_kwarg_z = np.concatenate(
            [np.where(computed_redshifts == zi)[0] for zi in np.atleast_1d(z)])
        values = np.array(deepcopy(self._current_state[quantity]))
        if quantity == "comoving_radial_distance":
            values = values[0]
        return values[i_kwarg_z]

    def close(self):
        self.classy.empty()

    def get_can_provide_params(self):
        names = ['Omega_Lambda', 'Omega_cdm', 'Omega_b', 'Omega_m', 'rs_drag', 'z_reio',
                 'YHe', 'Omega_k', 'age', 'sigma8']
        for name, mapped in self.renames.items():
            if mapped in names:
                names.append(name)
        return names

    def get_version(self):
        return getattr(self.classy_module, '__version__', None)

    # Installation routines

    @classmethod
    def get_path(cls, path):
        return os.path.realpath(os.path.join(path, "code", cls.__name__))

    @classmethod
    def get_import_path(cls, path):
        log = logging.getLogger(cls.__name__)
        classy_build_path = os.path.join(path, "python", "build")
        if not os.path.isdir(classy_build_path):
            log.error("Either CLASS is not in the given folder, "
                      "'%s', or you have not compiled it.", path)
            return None
        py_version = "%d.%d" % (sys.version_info.major, sys.version_info.minor)
        try:
            post = next(d for d in os.listdir(classy_build_path)
                        if (d.startswith("lib.") and py_version in d))
        except StopIteration:
            log.error("The CLASS installation at '%s' has not been compiled for the "
                      "current Python version.", path)
            return None
        return os.path.join(classy_build_path, post)

    @classmethod
    def is_compatible(cls):
        import platform
        if platform.system() == "Windows":
            return False
        return True

    @classmethod
    def is_installed(cls, **kwargs):
        log = logging.getLogger(cls.__name__)
        if not kwargs.get("code", True):
            return True
        path = kwargs["path"]
        if path is not None and path.lower() == "global":
            path = None
        if path and not kwargs.get("allow_global"):
            log.info("Importing *local* CLASS from '%s'.", path)
            if not os.path.exists(path):
                log.error("The given folder does not exist: '%s'", path)
                return False
            classy_build_path = cls.get_import_path(path)
            if not classy_build_path:
                return False
        elif not path:
            log.info("Importing *global* CLASS.")
            classy_build_path = None
        else:
            log.info("Importing *auto-installed* CLASS (but defaulting to *global*).")
            classy_build_path = cls.get_import_path(path)
        try:
            return load_module(
                'classy', path=classy_build_path, min_version=cls._classy_repo_version)
        except ImportError:
            if path is not None and path.lower() != "global":
                log.error("Couldn't find the CLASS python interface at '%s'. "
                          "Are you sure it has been installed there?", path)
            else:
                log.error("Could not import global CLASS installation. "
                          "Specify a Cobaya or CLASS installation path, "
                          "or install the CLASS Python interface globally with "
                          "'cd /path/to/class/python/ ; python setup.py install'")
            return False
        except VersionCheckError as e:
            log.error(str(e))
            return False

    @classmethod
    def install(cls, path=None, force=False, code=True, no_progress_bars=False, **kwargs):
        log = logging.getLogger(cls.__name__)
        if not code:
            log.info("Code not requested. Nothing to do.")
            return True
        log.info("Installing pre-requisites...")
        exit_status = pip_install("cython")
        if exit_status:
            log.error("Could not install pre-requisite: cython")
            return False
        log.info("Downloading classy...")
        success = download_github_release(
            os.path.join(path, "code"), cls._classy_repo_name, cls._classy_repo_version,
            repo_rename=cls.__name__, no_progress_bars=no_progress_bars, logger=log)
        if not success:
            log.error("Could not download classy.")
            return False
        classy_path = cls.get_path(path)
        log.info("Compiling classy...")
        from subprocess import Popen, PIPE
        env = deepcopy(os.environ)
        env.update({"PYTHON": sys.executable})
        process_make = Popen(["make"], cwd=classy_path, stdout=PIPE, stderr=PIPE, env=env)
        out, err = process_make.communicate()
        if process_make.returncode:
            log.info(out)
            log.info(err)
            log.error("Compilation failed!")
            return False
        return True
Пример #21
0
class classy(_cosmo):

    def initialize(self):
        """Importing CLASS from the correct path, if given, and if not, globally."""
        # If path not given, try using general path to modules
        if not self.path and self.path_install:
            self.path = os.path.join(
                self.path_install, "code", classy_repo_rename)
        if self.path:
            self.log.info("Importing *local* classy from " + self.path)
            classy_build_path = os.path.join(self.path, "python", "build")
            post = next(d for d in os.listdir(classy_build_path) if d.startswith("lib."))
            classy_build_path = os.path.join(classy_build_path, post)
            if not os.path.exists(classy_build_path):
                self.log.error("Either CLASS is not in the given folder, "
                               "'%s', or you have not compiled it.", self.path)
                raise HandledException
            # Inserting the previously found path into the list of import folders
            sys.path.insert(0, classy_build_path)
        else:
            self.log.info("Importing *global* CLASS.")
        try:
            from classy import Class, CosmoSevereError, CosmoComputationError
        except ImportError:
            self.log.error(
                "Couldn't find the CLASS python interface. "
                "Make sure that you have compiled it, and that you either\n"
                " (a) specify a path (you didn't) or\n"
                " (b) install the Python interface globally with\n"
                "     '/path/to/class/python/python setup.py install --user'")
            raise HandledException
        self.classy = Class()
        # Propagate errors up
        global CosmoComputationError, CosmoSevereError
        # Generate states, to avoid recomputing
        self.n_states = 3
        self.states = [{"params": None, "derived": None, "derived_extra": None,
                        "last": 0} for i in range(self.n_states)]
        # Dict of named tuples to collect requirements and computation methods
        self.collectors = {}
        # Additional input parameters to pass to CLASS
        self.extra_args = self.extra_args or {}
        # Add general CLASS stuff
        self.extra_args["output"] = self.extra_args.get("output", "")
        if "sBBN file" in self.extra_args:
            self.extra_args["sBBN file"] = (
                self.extra_args["sBBN file"].format(classy=self.path))
        # Set aliases
        self.planck_to_classy = self.renames
        # Derived parameters that may not have been requested, but will be necessary later
        self.derived_extra = []

    def current_state(self):
        lasts = [self.states[i]["last"] for i in range(self.n_states)]
        return self.states[lasts.index(max(lasts))]

    def needs(self, **requirements):
        # Computed quantities required by the likelihood
        super(classy, self).needs(**requirements)
        for k, v in self._needs.items():
            # Products and other computations
            if k.lower() == "cl":
                if any([("t" in cl.lower()) for cl in v]):
                    self.extra_args["output"] += " tCl"
                if any([(("e" in cl.lower()) or ("b" in cl.lower())) for cl in v]):
                    self.extra_args["output"] += " pCl"
                # For modern experiments, always lensed Cl's!
                self.extra_args["output"] += " lCl"
                self.extra_args["lensing"] = "yes"
                # For l_max_scalars, remember previous entries.
                self.extra_args["l_max_scalars"] = max(v.values())
                self.collectors[k.lower()] = collector(
                    method="lensed_cl", kwargs={"lmax": self.extra_args["l_max_scalars"]})
            elif k.lower() == "h":
                self.collectors[k.lower()] = collector(
                    method="Hubble",
                    args=[np.atleast_1d(v["z"])],
                    args_names=["z"],
                    arg_array=0)
                self.H_units_conv_factor = {"1/Mpc": 1, "km/s/Mpc": _c_km_s}
            elif k.lower() == "angular_diameter_distance":
                self.collectors[k.lower()] = collector(
                    method="angular_distance",
                    args=[np.atleast_1d(v["z"])],
                    args_names=["z"],
                    arg_array=0)
            elif k.lower() == "comoving_radial_distance":
                self.collectors[k.lower()] = collector(
                    method="z_of_r",
                    args_names=["z"],
                    args=[np.atleast_1d(v["z"])])
            elif k.lower() == "pk_interpolator":
                self.extra_args["output"] += " mPk"
                self.extra_args["P_k_max_h/Mpc"] = max(
                    v.pop("k_max"), self.extra_args.get("P_k_max_h/Mpc", 0))
                self.add_z_for_matter_power(v.pop("z"))
                # Use halofit by default if non-linear requested but no code specified
                if v.get("nonlinear", False) and "non linear" not in self.extra_args:
                    self.extra_args["non linear"] = non_linear_default_code
                for pair in v.pop("vars_pairs", [["delta_tot", "delta_tot"]]):
                    if any([x.lower() != "delta_tot" for x in pair]):
                        self.log.error("NotImplemented in CLASS: %r", pair)
                        raise HandledException
                    self._Pk_interpolator_kwargs = {
                        "logk": True, "extrap_kmax": v.pop("extrap_kmax", None)}
                    name = "Pk_interpolator_%s_%s" % (pair[0], pair[1])
                    self.collectors[name] = collector(
                        method="get_pk_and_k_and_z",
                        kwargs=v,
                        post=(lambda P, k, z:PowerSpectrumInterpolator(
                            z, k, P.T, **self._Pk_interpolator_kwargs)))
            elif v is None:
                k_translated = self.translate_param(k, force=True)
                if k_translated not in self.derived_extra:
                    self.derived_extra += [k_translated]
            else:
                self.log.error("Requested product not known: %r", {k: v})
                raise HandledException
        # Derived parameters (if some need some additional computations)
        if any([("sigma8" in s) for s in self.output_params or requirements]):
            self.extra_args["output"] += " mPk"
            self.extra_args["P_k_max_h/Mpc"] = (
                max(1, self.extra_args.get("P_k_max_h/Mpc", 0)))
        # Adding tensor modes if requested
        if self.extra_args.get("r") or "r" in self.input_params:
            self.extra_args["modes"] = "s,t"
        # If B spectrum with l>50, or lensing, recommend using Halofit
        try:
            cls = self.needs[next(k for k in ["cl", "Cl", "CL"] if k in self._needs)]
        except:
            cls = {}
        if (((any([("b" in cl.lower()) for cl in cls]) and
              max([cls[cl] for cl in cls if "b" in cl.lower()]) > 50) or
             any([("p" in cl.lower()) for cl in cls]) and
             not self.extra_args.get("non linear"))):
            self.log.warning("Requesting BB for ell>50 or lensing Cl's: "
                             "using a non-linear code is recommended (and you are not "
                             "using any). To activate it, set "
                             "'non_linear: halofit|hmcode|...' in classy's 'extra_args'.")
        # Cleanup of products string
        self.extra_args["output"] = " ".join(set(self.extra_args["output"].split()))
        # Finally, check that there are no repeated parameters between input and extra
        if set(self.input_params).intersection(set(self.extra_args)):
            self.log.error(
                "The following parameters appear both as input parameters and as CLASS "
                "extra arguments: %s. Please, remove one of the definitions of each.",
                list(set(self.input_params).intersection(set(self.extra_args))))
            raise HandledException

    def add_z_for_matter_power(self, z):
        if not hasattr(self, "z_for_matter_power"):
            self.z_for_matter_power = np.empty((0))
        self.z_for_matter_power = np.flip(np.sort(np.unique(np.concatenate(
            [self.z_for_matter_power, np.atleast_1d(z)]))), axis=0)
        self.extra_args["z_pk"] = " ".join(["%g" % zi for zi in self.z_for_matter_power])

    def translate_param(self, p, force=False):
        # "force=True" is used when communicating with likelihoods, which speak "planck"
        if self.use_planck_names or force:
            return self.planck_to_classy.get(p, p)
        return p

    def set(self, params_values_dict, i_state):
        # Store them, to use them later to identify the state
        self.states[i_state]["params"] = deepcopy(params_values_dict)
        # Prepare parameters to be passed: this-iteration + extra
        args = {self.translate_param(p): v for p, v in params_values_dict.items()}
        args.update(self.extra_args)
        # Generate and save
        self.log.debug("Setting parameters: %r", args)
        self.classy.struct_cleanup()
        self.classy.set(**args)

    def compute(self, _derived=None, cached=True, **params_values_dict):
        lasts = [self.states[i]["last"] for i in range(self.n_states)]
        try:
            if not cached:
                raise StopIteration
            # are the parameter values there already?
            i_state = next(i for i in range(self.n_states)
                           if self.states[i]["params"] == params_values_dict)
            # has any new product been requested?
            for product in self.collectors:
                next(k for k in self.states[i_state] if k == product)
            reused_state = True
            # Get (pre-computed) derived parameters
            if _derived == {}:
                _derived.update(self.states[i_state]["derived"])
            self.log.debug("Re-using computed results (state %d)", i_state)
        except StopIteration:
            reused_state = False
            # update the (first) oldest one and compute
            i_state = lasts.index(min(lasts))
            self.log.debug("Computing (state %d)", i_state)
            if self.timing:
                a = time()
            # Set parameters
            self.set(params_values_dict, i_state)
            # Compute!
            try:
                self.classy.compute()
            # "Valid" failure of CLASS: parameters too extreme -> log and report
            except CosmoComputationError:
                self.log.debug("Computation of cosmological products failed. "
                               "Assigning 0 likelihood and going on.")
                return 0
            # CLASS not correctly initialized, or input parameters not correct
            except CosmoSevereError:
                self.log.error("Serious error setting parameters or computing results. "
                               "The parameters passed were %r and %r. "
                               "See original CLASS's error traceback below.\n",
                               self.states[i_state]["params"], self.extra_args)
                raise  # No HandledException, so that CLASS traceback gets printed
            # Gather products
            for product, collector in self.collectors.items():
                # Special case: sigma8 needs H0, which cannot be known beforehand:
                if "sigma8" in self.collectors:
                    self.collectors["sigma8"].args[0] = 8 / self.classy.h()
                method = getattr(self.classy, collector.method)
                arg_array = self.collectors[product].arg_array
                if arg_array is None:
                    self.states[i_state][product] = method(
                        *self.collectors[product].args, **self.collectors[product].kwargs)
                elif isinstance(arg_array, Number):
                    self.states[i_state][product] = np.zeros(
                        len(self.collectors[product].args[arg_array]))
                    for i, v in enumerate(self.collectors[product].args[arg_array]):
                        args = (list(self.collectors[product].args[:arg_array]) + [v] +
                                list(self.collectors[product].args[arg_array + 1:]))
                        self.states[i_state][product][i] = method(
                            *args, **self.collectors[product].kwargs)
                elif arg_array in self.collectors[product].kwargs:
                    value = np.atleast_1d(self.collectors[product].kwargs[arg_array])
                    self.states[i_state][product] = np.zeros(value.shape)
                    for i, v in enumerate(value):
                        kwargs = deepcopy(self.collectors[product].kwargs)
                        kwargs[arg_array] = v
                        self.states[i_state][product][i] = method(
                            *self.collectors[product].args, **kwargs)
                self.states[i_state][product] = collector.post(
                    self.states[i_state][product])
            # Prepare derived parameters
            d, d_extra = self.get_derived_all(derived_requested=(_derived == {}))
            if _derived == {}:
                _derived.update(d)
            self.states[i_state]["derived"] = odict(
                [[p, _derived.get(p)] for p in self.output_params])
            # Prepare necessary extra derived parameters
            self.states[i_state]["derived_extra"] = deepcopy(d_extra)
            if self.timing:
                self.n += 1
                self.time_avg = (time() - a + self.time_avg * (self.n - 1)) / self.n
                self.log.debug("Average running time: %g seconds", self.time_avg)
        # make this one the current one by decreasing the antiquity of the rest
        for i in range(self.n_states):
            self.states[i]["last"] -= max(lasts)
        self.states[i_state]["last"] = 1
        return 1 if reused_state else 2

    def get_derived_all(self, derived_requested=True):
        """
        Returns a dictionary of derived parameters with their values,
        using the *current* state (i.e. it should only be called from
        the ``compute`` method).

        Parameter names are returned in CLASS nomenclature.

        To get a parameter *from a likelihood* use `get_param` instead.
        """
        # Put all pamaremters in CLASS nomenclature (self.derived_extra already is)
        requested = [self.translate_param(p) for p in (
            self.output_params if derived_requested else [])]
        requested_and_extra = {
            p: None for p in set(requested).union(set(self.derived_extra))}
        # Parameters with their own getters
        if "rs_drag" in requested_and_extra:
            requested_and_extra["rs_drag"] = self.classy.rs_drag()
        elif "Omega_nu" in requested_and_extra:
            requested_and_extra["Omega_nu"] = self.classy.Omega_nu
        # Get the rest using the general derived param getter
        # No need for error control: classy.get_current_derived_parameters is passed
        # every derived parameter not excluded before, and cause an error, indicating
        # which parameters are not recognized
        requested_and_extra.update(
            self.classy.get_current_derived_parameters(
                [p for p, v in requested_and_extra.items() if v is None]))
        # Separate the parameters before returning
        # Remember: self.output_params is in sampler nomenclature,
        # but self.derived_extra is in CLASS
        derived = {
            p: requested_and_extra[self.translate_param(p)] for p in self.output_params}
        derived_extra = {p: requested_and_extra[p] for p in self.derived_extra}
        return derived, derived_extra

    def get_param(self, p):
        current_state = self.current_state()
        for pool in ["params", "derived", "derived_extra"]:
            value = deepcopy(
                current_state[pool].get(self.translate_param(p, force=True), None))
            if value is not None:
                return value
        self.log.error("Parameter not known: '%s'", p)
        raise HandledException

    def get_cl(self, ell_factor=False, units="muK2"):
        current_state = self.current_state()
        try:
            cls = deepcopy(current_state["cl"])
        except:
            self.log.error(
                "No Cl's were computed. Are you sure that you have requested them?")
            raise HandledException
        # unit conversion and ell_factor
        ell_factor = ((cls["ell"] + 1) * cls["ell"] / (2 * np.pi))[2:] if ell_factor else 1
        units_factors = {"1": 1,
                         "muK2": _T_CMB_K * 1.e6,
                         "K2": _T_CMB_K}
        try:
            units_factor = units_factors[units]
        except KeyError:
            self.log.error("Units '%s' not recognized. Use one of %s.",
                           units, list(units_factors))
            raise HandledException
        for cl in cls:
            if cl not in ['pp', 'ell']:
                cls[cl][2:] *= units_factor ** 2 * ell_factor
        if "pp" in cls and ell_factor is not 1:
            cls['pp'][2:] *= ell_factor ** 2 * (2 * np.pi)
        return cls

    def _get_z_dependent(self, quantity, z):
        try:
            z_name = next(k for k in ["redshifts", "z"]
                          if k in self.collectors[quantity].kwargs)
            computed_redshifts = self.collectors[quantity].kwargs[z_name]
        except StopIteration:
            computed_redshifts = self.collectors[quantity].args[
                self.collectors[quantity].args_names.index("z")]
        i_kwarg_z = np.concatenate(
            [np.where(computed_redshifts == zi)[0] for zi in np.atleast_1d(z)])
        values = np.array(deepcopy(self.current_state()[quantity]))
        if quantity == "comoving_radial_distance":
            values = values[0]
        return values[i_kwarg_z]

    def get_H(self, z, units="km/s/Mpc"):
        try:
            return self._get_z_dependent("h", z) * self.H_units_conv_factor[units]
        except KeyError:
            self.log.error("Units not known for H: '%s'. Try instead one of %r.",
                           units, list(self.H_units_conv_factor))
            raise HandledException

    def get_angular_diameter_distance(self, z):
        return self._get_z_dependent("angular_diameter_distance", z)

    def get_comoving_radial_distance(self, z):
        return self._get_z_dependent("comoving_radial_distance", z)

    def get_Pk_interpolator(self):
        current_state = self.current_state()
        prefix = "Pk_interpolator_"
        return {k[len(prefix):]: deepcopy(v)
                for k, v in current_state.items() if k.startswith(prefix)}

    def close(self):
        self.classy.struct_cleanup()
Пример #22
0
#Lambda CDM
LCDM = Class()
LCDM.set({'Omega_cdm': 0.25, 'Omega_b': 0.05})
LCDM.compute()

# In[ ]:

#Einstein-de Sitter
CDM = Class()
CDM.set({'Omega_cdm': 0.95, 'Omega_b': 0.05})
CDM.compute()

# Just to cross-check that Omega_Lambda is negligible
# (but not exactly zero because we neglected radiation)
derived = CDM.get_current_derived_parameters(['Omega0_lambda'])
print derived
print "Omega_Lambda =", derived['Omega0_lambda']

# In[ ]:

#Get background quantities and recover their names:
baLCDM = LCDM.get_background()
baCDM = CDM.get_background()
baCDM.viewkeys()

# In[ ]:

#Get H_0 in order to plot the distances in this unit
fLCDM = LCDM.Hubble(0)
fCDM = CDM.Hubble(0)
Пример #23
0
 #
 # clM = M.raw_cl(2500
 clM = M.lensed_cl(2500)
 ll = clM['ell'][2:]
 clTT = clM['tt'][2:]
 clTE = clM['te'][2:]
 clEE = clM['ee'][2:]
 bg = M.get_background()
 Omega_scf = bg['(.)Omega_scf']
 H = bg['H [1/Mpc]']
 Da = bg['ang.diam.dist.']
 z = bg['z']
 tau = bg['conf. time [Mpc]']
 tau_interp = interp1d(z, tau)
 tau_0 = tau_interp(0)
 derived = M.get_current_derived_parameters(['z_rec', 'tau_rec'])
 tau_rec = derived['tau_rec']
 for j in range(len(Omega_scf)):
     if Omega_scf[j] > 0.01:
         k = H[j] / (1 + z[j])
         # l.append(k*Da[j])
         l.append(k * (tau_0 - tau_rec))
 print(450. / (tau_0 - tau_rec))
 #    store P(k) for common k values
 lmax = max(l)
 lmin = min(l)
 l = []
 l.append(lmin)
 l.append(2 * lmax)
 if plot_pk == True:
     pkM = []
Пример #24
0
#Lambda CDM
LCDM = Class()
LCDM.set({'Omega_cdm':0.25,'Omega_b':0.05})
LCDM.compute()


# In[ ]:

#Einstein-de Sitter
CDM = Class()
CDM.set({'Omega_cdm':0.95,'Omega_b':0.05})
CDM.compute()

# Just to cross-check that Omega_Lambda is negligible
# (but not exactly zero because we neglected radiation)
derived = CDM.get_current_derived_parameters(['Omega0_lambda'])
print derived
print "Omega_Lambda =",derived['Omega0_lambda']


# In[ ]:

#Get background quantities and recover their names:
baLCDM = LCDM.get_background()
baCDM = CDM.get_background()
baCDM.viewkeys()


# In[ ]:

#Get H_0 in order to plot the distances in this unit
Пример #25
0
    'tau_reio': 0.0925,
    # Take fixed value for primordial Helium (instead of automatic BBN adjustment)
    'YHe': 0.246,
    # other output and precision parameters
    'l_max_scalars': 5000,
    'P_k_max_1/Mpc': 10.0,
    'gauge': 'newtonian'
}
###############
#
# call CLASS a first time just to compute z_rec (will compute transfer functions at default: z=0)
#
M = Class()
M.set(common_settings)
M.compute()
derived = M.get_current_derived_parameters(
    ['z_rec', 'tau_rec', 'conformal_age'])
#print derived.viewkeys()
z_rec = derived['z_rec']
z_rec = int(1000. * z_rec) / 1000.  # round down at 4 digits after coma
M.struct_cleanup()  # clean output
M.empty()  # clean input
#
# call CLASS again (will compute transfer functions at inout value z_rec)
#
M = Class()
M.set(common_settings)
M.set({'z_pk': z_rec})
M.compute()
#
# load transfer functions at recombination
#
Пример #26
0
Theta0 = 0.25*one_k['delta_g']
phi = one_k['phi']
psi = one_k['psi']
theta_b = one_k['theta_b']
a = one_k['a']
# compute related quantitites
R = 3./4.*M.Omega_b()/M.Omega_g()*a    # R = 3/4 * (rho_b/rho_gamma)
zero_point = -(1.+R)*psi               # zero point of oscillations: -(1.+R)*psi
#
# get Theta0 oscillation amplitude (for vertical scale of plot)
#
Theta0_amp = max(Theta0.max(),-Theta0.min())
#
# get the time of decoupling
#
quantities = M.get_current_derived_parameters(['tau_rec'])
# print times.viewkeys()
tau_rec = quantities['tau_rec']
#
# use table of background quantitites to find the time of
# Hubble crossing (k / (aH)= 2 pi), sound horizon crossing (k * rs = 2pi)
#
background = M.get_background() # load background table
#print background.viewkeys()
#
background_tau = background['conf. time [Mpc]'] # read confromal times in background table
background_z = background['z'] # read redshift
background_k_over_aH = k/background['H [1/Mpc]']*(1.+background['z']) # read k/aH = k(1+z)/H
background_k_rs = k * background['comov.snd.hrz.'] # read k * rs
background_rho_m_over_r =    (background['(.)rho_b']+background['(.)rho_cdm'])    /(background['(.)rho_g']+background['(.)rho_ur']) # read rho_r / rho_m (to find time of equality)
#