Пример #1
0
    def loglkl(self, params):
        cosmo = Class()
        cosmo.set(params)
        cosmo.compute()

        chi2 = 0.

        # for each point, compute angular distance da, radial distance dr,
        # volume distance dv, sound horizon at baryon drag rs_d,
        # theoretical prediction and chi2 contribution
        for i in range(self.num_points):

            da = cosmo.angular_distance(self.z[i])
            dr = self.z[i] / cosmo.Hubble(self.z[i])
            dv = pow(da * da * (1 + self.z[i]) * (1 + self.z[i]) * dr, 1. / 3.)
            rs = cosmo.rs_drag()

            if self.type[i] == 3:
                theo = dv / rs

            elif self.type[i] == 4:
                theo = dv

            elif self.type[i] == 5:
                theo = da / rs

            elif self.type[i] == 6:
                theo = 1. / cosmo.Hubble(self.z[i]) / rs

            elif self.type[i] == 7:
                theo = rs / dv

            chi2 += ((theo - self.data[i]) / self.error[i]) ** 2

        # return ln(L)
        # lkl = - 0.5 * chi2
        # return -2ln(L)
        lkl = chi2

        return lkl
Пример #2
0
def get_bao_rs_dV(zs,params=None,engine='camb',de='ppf'):
    #FIXME: camb and class only agree at 3% level!!!
    import camb
    params = map_params(params,engine=engine)
    if engine=='camb':
        pars = set_camb_pars(params=params,de=de)
        results = camb.get_results(pars)
        retval = results.get_BAO(zs,pars)[:,0]
    elif engine=='class':
        from classy import Class
        zs = np.asarray(zs)
        cosmo = Class()
        params['output'] = ''
        cosmo.set(params)
        cosmo.compute()
        Hzs = np.array([cosmo.Hubble(z) for z in zs])
        D_As = np.array([cosmo.angular_distance(z) for z in zs])
        D_Vs = ((1+zs)**2 * D_As**2 * zs/Hzs)**(1/3.)
        retval = cosmo.rs_drag()/D_Vs
        cosmo.struct_cleanup()
        cosmo.empty()
    return retval
Пример #3
0
class classy(_cosmo):

    def initialize(self):
        """Importing CLASS from the correct path, if given, and if not, globally."""
        # If path not given, try using general path to modules
        if not self.path and self.path_install:
            self.path = os.path.join(
                self.path_install, "code", classy_repo_rename)
        if self.path:
            self.log.info("Importing *local* classy from " + self.path)
            classy_build_path = os.path.join(self.path, "python", "build")
            post = next(d for d in os.listdir(classy_build_path) if d.startswith("lib."))
            classy_build_path = os.path.join(classy_build_path, post)
            if not os.path.exists(classy_build_path):
                self.log.error("Either CLASS is not in the given folder, "
                               "'%s', or you have not compiled it.", self.path)
                raise HandledException
            # Inserting the previously found path into the list of import folders
            sys.path.insert(0, classy_build_path)
        else:
            self.log.info("Importing *global* CLASS.")
        try:
            from classy import Class, CosmoSevereError, CosmoComputationError
        except ImportError:
            self.log.error(
                "Couldn't find the CLASS python interface. "
                "Make sure that you have compiled it, and that you either\n"
                " (a) specify a path (you didn't) or\n"
                " (b) install the Python interface globally with\n"
                "     '/path/to/class/python/python setup.py install --user'")
            raise HandledException
        self.classy = Class()
        # Propagate errors up
        global CosmoComputationError, CosmoSevereError
        # Generate states, to avoid recomputing
        self.n_states = 3
        self.states = [{"params": None, "derived": None, "derived_extra": None,
                        "last": 0} for i in range(self.n_states)]
        # Dict of named tuples to collect requirements and computation methods
        self.collectors = {}
        # Additional input parameters to pass to CLASS
        self.extra_args = self.extra_args or {}
        # Add general CLASS stuff
        self.extra_args["output"] = self.extra_args.get("output", "")
        if "sBBN file" in self.extra_args:
            self.extra_args["sBBN file"] = (
                self.extra_args["sBBN file"].format(classy=self.path))
        # Set aliases
        self.planck_to_classy = self.renames
        # Derived parameters that may not have been requested, but will be necessary later
        self.derived_extra = []

    def current_state(self):
        lasts = [self.states[i]["last"] for i in range(self.n_states)]
        return self.states[lasts.index(max(lasts))]

    def needs(self, **requirements):
        # Computed quantities required by the likelihood
        super(classy, self).needs(**requirements)
        for k, v in self._needs.items():
            # Products and other computations
            if k.lower() == "cl":
                if any([("t" in cl.lower()) for cl in v]):
                    self.extra_args["output"] += " tCl"
                if any([(("e" in cl.lower()) or ("b" in cl.lower())) for cl in v]):
                    self.extra_args["output"] += " pCl"
                # For modern experiments, always lensed Cl's!
                self.extra_args["output"] += " lCl"
                self.extra_args["lensing"] = "yes"
                # For l_max_scalars, remember previous entries.
                self.extra_args["l_max_scalars"] = max(v.values())
                self.collectors[k.lower()] = collector(
                    method="lensed_cl", kwargs={"lmax": self.extra_args["l_max_scalars"]})
            elif k.lower() == "h":
                self.collectors[k.lower()] = collector(
                    method="Hubble",
                    args=[np.atleast_1d(v["z"])],
                    args_names=["z"],
                    arg_array=0)
                self.H_units_conv_factor = {"1/Mpc": 1, "km/s/Mpc": _c_km_s}
            elif k.lower() == "angular_diameter_distance":
                self.collectors[k.lower()] = collector(
                    method="angular_distance",
                    args=[np.atleast_1d(v["z"])],
                    args_names=["z"],
                    arg_array=0)
            elif k.lower() == "comoving_radial_distance":
                self.collectors[k.lower()] = collector(
                    method="z_of_r",
                    args_names=["z"],
                    args=[np.atleast_1d(v["z"])])
            elif k.lower() == "pk_interpolator":
                self.extra_args["output"] += " mPk"
                self.extra_args["P_k_max_h/Mpc"] = max(
                    v.pop("k_max"), self.extra_args.get("P_k_max_h/Mpc", 0))
                self.add_z_for_matter_power(v.pop("z"))
                # Use halofit by default if non-linear requested but no code specified
                if v.get("nonlinear", False) and "non linear" not in self.extra_args:
                    self.extra_args["non linear"] = non_linear_default_code
                for pair in v.pop("vars_pairs", [["delta_tot", "delta_tot"]]):
                    if any([x.lower() != "delta_tot" for x in pair]):
                        self.log.error("NotImplemented in CLASS: %r", pair)
                        raise HandledException
                    self._Pk_interpolator_kwargs = {
                        "logk": True, "extrap_kmax": v.pop("extrap_kmax", None)}
                    name = "Pk_interpolator_%s_%s" % (pair[0], pair[1])
                    self.collectors[name] = collector(
                        method="get_pk_and_k_and_z",
                        kwargs=v,
                        post=(lambda P, k, z:PowerSpectrumInterpolator(
                            z, k, P.T, **self._Pk_interpolator_kwargs)))
            elif v is None:
                k_translated = self.translate_param(k, force=True)
                if k_translated not in self.derived_extra:
                    self.derived_extra += [k_translated]
            else:
                self.log.error("Requested product not known: %r", {k: v})
                raise HandledException
        # Derived parameters (if some need some additional computations)
        if any([("sigma8" in s) for s in self.output_params or requirements]):
            self.extra_args["output"] += " mPk"
            self.extra_args["P_k_max_h/Mpc"] = (
                max(1, self.extra_args.get("P_k_max_h/Mpc", 0)))
        # Adding tensor modes if requested
        if self.extra_args.get("r") or "r" in self.input_params:
            self.extra_args["modes"] = "s,t"
        # If B spectrum with l>50, or lensing, recommend using Halofit
        try:
            cls = self.needs[next(k for k in ["cl", "Cl", "CL"] if k in self._needs)]
        except:
            cls = {}
        if (((any([("b" in cl.lower()) for cl in cls]) and
              max([cls[cl] for cl in cls if "b" in cl.lower()]) > 50) or
             any([("p" in cl.lower()) for cl in cls]) and
             not self.extra_args.get("non linear"))):
            self.log.warning("Requesting BB for ell>50 or lensing Cl's: "
                             "using a non-linear code is recommended (and you are not "
                             "using any). To activate it, set "
                             "'non_linear: halofit|hmcode|...' in classy's 'extra_args'.")
        # Cleanup of products string
        self.extra_args["output"] = " ".join(set(self.extra_args["output"].split()))
        # Finally, check that there are no repeated parameters between input and extra
        if set(self.input_params).intersection(set(self.extra_args)):
            self.log.error(
                "The following parameters appear both as input parameters and as CLASS "
                "extra arguments: %s. Please, remove one of the definitions of each.",
                list(set(self.input_params).intersection(set(self.extra_args))))
            raise HandledException

    def add_z_for_matter_power(self, z):
        if not hasattr(self, "z_for_matter_power"):
            self.z_for_matter_power = np.empty((0))
        self.z_for_matter_power = np.flip(np.sort(np.unique(np.concatenate(
            [self.z_for_matter_power, np.atleast_1d(z)]))), axis=0)
        self.extra_args["z_pk"] = " ".join(["%g" % zi for zi in self.z_for_matter_power])

    def translate_param(self, p, force=False):
        # "force=True" is used when communicating with likelihoods, which speak "planck"
        if self.use_planck_names or force:
            return self.planck_to_classy.get(p, p)
        return p

    def set(self, params_values_dict, i_state):
        # Store them, to use them later to identify the state
        self.states[i_state]["params"] = deepcopy(params_values_dict)
        # Prepare parameters to be passed: this-iteration + extra
        args = {self.translate_param(p): v for p, v in params_values_dict.items()}
        args.update(self.extra_args)
        # Generate and save
        self.log.debug("Setting parameters: %r", args)
        self.classy.struct_cleanup()
        self.classy.set(**args)

    def compute(self, _derived=None, cached=True, **params_values_dict):
        lasts = [self.states[i]["last"] for i in range(self.n_states)]
        try:
            if not cached:
                raise StopIteration
            # are the parameter values there already?
            i_state = next(i for i in range(self.n_states)
                           if self.states[i]["params"] == params_values_dict)
            # has any new product been requested?
            for product in self.collectors:
                next(k for k in self.states[i_state] if k == product)
            reused_state = True
            # Get (pre-computed) derived parameters
            if _derived == {}:
                _derived.update(self.states[i_state]["derived"])
            self.log.debug("Re-using computed results (state %d)", i_state)
        except StopIteration:
            reused_state = False
            # update the (first) oldest one and compute
            i_state = lasts.index(min(lasts))
            self.log.debug("Computing (state %d)", i_state)
            if self.timing:
                a = time()
            # Set parameters
            self.set(params_values_dict, i_state)
            # Compute!
            try:
                self.classy.compute()
            # "Valid" failure of CLASS: parameters too extreme -> log and report
            except CosmoComputationError:
                self.log.debug("Computation of cosmological products failed. "
                               "Assigning 0 likelihood and going on.")
                return 0
            # CLASS not correctly initialized, or input parameters not correct
            except CosmoSevereError:
                self.log.error("Serious error setting parameters or computing results. "
                               "The parameters passed were %r and %r. "
                               "See original CLASS's error traceback below.\n",
                               self.states[i_state]["params"], self.extra_args)
                raise  # No HandledException, so that CLASS traceback gets printed
            # Gather products
            for product, collector in self.collectors.items():
                # Special case: sigma8 needs H0, which cannot be known beforehand:
                if "sigma8" in self.collectors:
                    self.collectors["sigma8"].args[0] = 8 / self.classy.h()
                method = getattr(self.classy, collector.method)
                arg_array = self.collectors[product].arg_array
                if arg_array is None:
                    self.states[i_state][product] = method(
                        *self.collectors[product].args, **self.collectors[product].kwargs)
                elif isinstance(arg_array, Number):
                    self.states[i_state][product] = np.zeros(
                        len(self.collectors[product].args[arg_array]))
                    for i, v in enumerate(self.collectors[product].args[arg_array]):
                        args = (list(self.collectors[product].args[:arg_array]) + [v] +
                                list(self.collectors[product].args[arg_array + 1:]))
                        self.states[i_state][product][i] = method(
                            *args, **self.collectors[product].kwargs)
                elif arg_array in self.collectors[product].kwargs:
                    value = np.atleast_1d(self.collectors[product].kwargs[arg_array])
                    self.states[i_state][product] = np.zeros(value.shape)
                    for i, v in enumerate(value):
                        kwargs = deepcopy(self.collectors[product].kwargs)
                        kwargs[arg_array] = v
                        self.states[i_state][product][i] = method(
                            *self.collectors[product].args, **kwargs)
                self.states[i_state][product] = collector.post(
                    self.states[i_state][product])
            # Prepare derived parameters
            d, d_extra = self.get_derived_all(derived_requested=(_derived == {}))
            if _derived == {}:
                _derived.update(d)
            self.states[i_state]["derived"] = odict(
                [[p, _derived.get(p)] for p in self.output_params])
            # Prepare necessary extra derived parameters
            self.states[i_state]["derived_extra"] = deepcopy(d_extra)
            if self.timing:
                self.n += 1
                self.time_avg = (time() - a + self.time_avg * (self.n - 1)) / self.n
                self.log.debug("Average running time: %g seconds", self.time_avg)
        # make this one the current one by decreasing the antiquity of the rest
        for i in range(self.n_states):
            self.states[i]["last"] -= max(lasts)
        self.states[i_state]["last"] = 1
        return 1 if reused_state else 2

    def get_derived_all(self, derived_requested=True):
        """
        Returns a dictionary of derived parameters with their values,
        using the *current* state (i.e. it should only be called from
        the ``compute`` method).

        Parameter names are returned in CLASS nomenclature.

        To get a parameter *from a likelihood* use `get_param` instead.
        """
        # Put all pamaremters in CLASS nomenclature (self.derived_extra already is)
        requested = [self.translate_param(p) for p in (
            self.output_params if derived_requested else [])]
        requested_and_extra = {
            p: None for p in set(requested).union(set(self.derived_extra))}
        # Parameters with their own getters
        if "rs_drag" in requested_and_extra:
            requested_and_extra["rs_drag"] = self.classy.rs_drag()
        elif "Omega_nu" in requested_and_extra:
            requested_and_extra["Omega_nu"] = self.classy.Omega_nu
        # Get the rest using the general derived param getter
        # No need for error control: classy.get_current_derived_parameters is passed
        # every derived parameter not excluded before, and cause an error, indicating
        # which parameters are not recognized
        requested_and_extra.update(
            self.classy.get_current_derived_parameters(
                [p for p, v in requested_and_extra.items() if v is None]))
        # Separate the parameters before returning
        # Remember: self.output_params is in sampler nomenclature,
        # but self.derived_extra is in CLASS
        derived = {
            p: requested_and_extra[self.translate_param(p)] for p in self.output_params}
        derived_extra = {p: requested_and_extra[p] for p in self.derived_extra}
        return derived, derived_extra

    def get_param(self, p):
        current_state = self.current_state()
        for pool in ["params", "derived", "derived_extra"]:
            value = deepcopy(
                current_state[pool].get(self.translate_param(p, force=True), None))
            if value is not None:
                return value
        self.log.error("Parameter not known: '%s'", p)
        raise HandledException

    def get_cl(self, ell_factor=False, units="muK2"):
        current_state = self.current_state()
        try:
            cls = deepcopy(current_state["cl"])
        except:
            self.log.error(
                "No Cl's were computed. Are you sure that you have requested them?")
            raise HandledException
        # unit conversion and ell_factor
        ell_factor = ((cls["ell"] + 1) * cls["ell"] / (2 * np.pi))[2:] if ell_factor else 1
        units_factors = {"1": 1,
                         "muK2": _T_CMB_K * 1.e6,
                         "K2": _T_CMB_K}
        try:
            units_factor = units_factors[units]
        except KeyError:
            self.log.error("Units '%s' not recognized. Use one of %s.",
                           units, list(units_factors))
            raise HandledException
        for cl in cls:
            if cl not in ['pp', 'ell']:
                cls[cl][2:] *= units_factor ** 2 * ell_factor
        if "pp" in cls and ell_factor is not 1:
            cls['pp'][2:] *= ell_factor ** 2 * (2 * np.pi)
        return cls

    def _get_z_dependent(self, quantity, z):
        try:
            z_name = next(k for k in ["redshifts", "z"]
                          if k in self.collectors[quantity].kwargs)
            computed_redshifts = self.collectors[quantity].kwargs[z_name]
        except StopIteration:
            computed_redshifts = self.collectors[quantity].args[
                self.collectors[quantity].args_names.index("z")]
        i_kwarg_z = np.concatenate(
            [np.where(computed_redshifts == zi)[0] for zi in np.atleast_1d(z)])
        values = np.array(deepcopy(self.current_state()[quantity]))
        if quantity == "comoving_radial_distance":
            values = values[0]
        return values[i_kwarg_z]

    def get_H(self, z, units="km/s/Mpc"):
        try:
            return self._get_z_dependent("h", z) * self.H_units_conv_factor[units]
        except KeyError:
            self.log.error("Units not known for H: '%s'. Try instead one of %r.",
                           units, list(self.H_units_conv_factor))
            raise HandledException

    def get_angular_diameter_distance(self, z):
        return self._get_z_dependent("angular_diameter_distance", z)

    def get_comoving_radial_distance(self, z):
        return self._get_z_dependent("comoving_radial_distance", z)

    def get_Pk_interpolator(self):
        current_state = self.current_state()
        prefix = "Pk_interpolator_"
        return {k[len(prefix):]: deepcopy(v)
                for k, v in current_state.items() if k.startswith(prefix)}

    def close(self):
        self.classy.struct_cleanup()
Пример #4
0
class classy(Theory):
    def initialise(self):
        """Importing CLASS from the correct path, if given, and if not, globally."""
        # If path not given, try using general path to modules
        path_to_installation = get_path_to_installation()
        if not self.path and path_to_installation:
            self.path = os.path.join(path_to_installation, "code",
                                     classy_repo_rename)
        if self.path:
            self.log.info("Importing *local* classy from " + self.path)
            classy_build_path = os.path.join(self.path, "python", "build")
            post = next(d for d in os.listdir(classy_build_path)
                        if d.startswith("lib."))
            classy_build_path = os.path.join(classy_build_path, post)
            if not os.path.exists(classy_build_path):
                self.log.error(
                    "Either CLASS is not in the given folder, "
                    "'%s', or you have not compiled it.", self.path)
                raise HandledException
            # Inserting the previously found path into the list of import folders
            sys.path.insert(0, classy_build_path)
        else:
            self.log.info("Importing *global* CLASS.")
        try:
            from classy import Class, CosmoSevereError, CosmoComputationError
        except ImportError:
            self.log.error(
                "Couldn't find the CLASS python interface. "
                "Make sure that you have compiled it, and that you either\n"
                " (a) specify a path (you didn't) or\n"
                " (b) install the Python interface globally with\n"
                "     '/path/to/class/python/python setup.py install --user'")
            raise HandledException
        self.classy = Class()
        # Propagate errors up
        global CosmoComputationError, CosmoSevereError
        # Generate states, to avoid recomputing
        self.n_states = 3
        self.states = [{
            "params": None,
            "derived": None,
            "derived_extra": None,
            "last": 0
        } for i in range(self.n_states)]
        # Dict of named tuples to collect requirements and computation methods
        self.collectors = {}
        # Additional input parameters to pass to CLASS
        self.extra_args = self.extra_args or {}
        self.extra_args["output"] = self.extra_args.get("output", "")
        if "sBBN file" in self.extra_args:
            self.extra_args["sBBN file"] = (os.path.join(
                self.path, self.extra_args["sBBN file"]))
        # Derived parameters that may not have been requested, but will be necessary later
        self.derived_extra = []

    def current_state(self):
        lasts = [self.states[i]["last"] for i in range(self.n_states)]
        return self.states[lasts.index(max(lasts))]

    def needs(self, arguments):
        # Computed quantities requiered by the likelihood
        arguments = arguments or {}
        for k, v in arguments.items():
            # Precision parameters and boundaries (in general, take max of all requested)
            if k == "l_max":
                self.extra_args["l_max_scalars"] = (max(
                    v, self.extra_args.get("l_max_scalars", 0)))
            elif k == "k_max":
                self.extra_args["P_k_max_h/Mpc"] = (max(
                    v, self.extra_args.get("P_k_max_h/Mpc", 0)))
            # Products and other computations
            elif k == "Cl":
                if any([("t" in cl.lower()) for cl in v]):
                    self.extra_args["output"] += " tCl"
                if any([(("e" in cl.lower()) or ("b" in cl.lower()))
                        for cl in v]):
                    self.extra_args["output"] += " pCl"
                # For modern experiments, always lensed Cl's!
                self.extra_args["output"] += " lCl"
                self.extra_args["lensing"] = "yes"
                self.extra_args["non linear"] = "halofit"
                self.collectors[k] = collector(method="lensed_cl", kwargs={})
                self.collectors["TCMB"] = collector(method="T_cmb", kwargs={})
            elif k == "fsigma8":
                self.collectors["growth_factor_f"] = collector(
                    method="scale_independent_growth_factor_f",
                    args=[np.atleast_1d(v["redshifts"])],
                    arg_array=0)
                self.collectors["sigma8"] = collector(
                    method="sigma",
                    # Notice: Needs H0 for 1st arg (R), so added later
                    args=[None, np.atleast_1d(v["redshifts"])],
                    arg_array=1)
                if "H0" not in self.input_params:
                    self.derived_extra += ["H0"]
                self.extra_args["output"] += " mPk"
                self.extra_args["P_k_max_h/Mpc"] = (max(
                    1, self.extra_args.get("P_k_max_h/Mpc", 0)))
                self.add_z_for_matter_power(v["redshifts"])
            elif k == "h_of_z":
                self.collectors[k] = collector(
                    method="Hubble",
                    args=[np.atleast_1d(v["redshifts"])],
                    arg_array=0)
                self.H_units_conv_factor = {
                    "/Mpc": 1,
                    "km/s/Mpc": _c
                }[v["units"]]
            elif k == "angular_diameter_distance":
                self.collectors[k] = collector(
                    method="angular_distance",
                    args=[np.atleast_1d(v["redshifts"])],
                    arg_array=0)
            else:
                # Extra derived parameters
                if v is None:
                    self.derived_extra += [self.translate_param(k)]
                else:
                    self.log.error("Unknown required product: '%s:%s'.", k, v)
                    raise HandledException
        # Derived parameters (if some need some additional computations)
        if "sigma8" in self.output_params or arguments:
            self.extra_args["output"] += " mPk"
            self.extra_args["P_k_max_h/Mpc"] = (max(
                1, self.extra_args.get("P_k_max_h/Mpc", 0)))
        # Since the Cl collector needs lmax, update it now, in case it has increased
        # *after* declaring the Cl collector
        if "Cl" in self.collectors:
            self.collectors["Cl"].kwargs["lmax"] = self.extra_args[
                "l_max_scalars"]
        # Cleanup of products string
        self.extra_args["output"] = " ".join(
            set(self.extra_args["output"].split()))

    def add_z_for_matter_power(self, z):
        if not hasattr(self, "z_for_matter_power"):
            self.z_for_matter_power = np.empty((0))
        self.z_for_matter_power = np.flip(np.sort(
            np.unique(
                np.concatenate([self.z_for_matter_power,
                                np.atleast_1d(z)]))),
                                          axis=0)
        self.extra_args["z_pk"] = " ".join(
            ["%g" % zi for zi in self.z_for_matter_power])

    def translate_param(self, p):
        if self.use_planck_names:
            return self.planck_to_classy.get(p, p)
        return p

    def set(self, params_values_dict, i_state):
        # Store them, to use them later to identify the state
        self.states[i_state]["params"] = deepcopy(params_values_dict)
        # Prepare parameters to be passed: this-iteration + extra
        args = {
            self.translate_param(p): v
            for p, v in params_values_dict.items()
        }
        args.update(self.extra_args)
        # Generate and save
        self.log.debug("Setting parameters: %r", args)
        self.classy.struct_cleanup()
        self.classy.set(**args)

    def compute(self, derived=None, **params_values_dict):
        lasts = [self.states[i]["last"] for i in range(self.n_states)]
        try:
            # are the parameter values there already?
            i_state = next(i for i in range(self.n_states)
                           if self.states[i]["params"] == params_values_dict)
            # Get (pre-computed) derived parameters
            if derived == {}:
                derived.update(self.states[i_state]["derived"])
            self.log.debug("Re-using computed results (state %d)", i_state)
        except StopIteration:
            # update the (first) oldest one and compute
            i_state = lasts.index(min(lasts))
            self.log.debug("Computing (state %d)", i_state)
            # Set parameters
            self.set(params_values_dict, i_state)
            # Compute!
            try:
                self.classy.compute()
            # "Valid" failure of CLASS: parameters too extreme -> log and report
            except CosmoComputationError:
                self.log.debug("Computation of cosmological products failed. "
                               "Assigning 0 likelihood and going on.")
                return False
            # CLASS not correctly initialised, or input parameters not correct
            except CosmoSevereError:
                self.log.error(
                    "Serious error setting parameters or computing results. "
                    "The parameters passed were %r and %r. "
                    "See original CLASS's error traceback below.\n",
                    self.states[i_state]["params"], self.extra_args)
                raise  # No HandledException, so that CLASS traceback gets printed
            # Gather products
            for product, collector in self.collectors.items():
                # Special case: sigma8 needs H0, which cannot be known beforehand:
                if "sigma8" in self.collectors:
                    self.collectors["sigma8"].args[0] = 8 / self.classy.h()
                method = getattr(self.classy, collector.method)
                if self.collectors[product].arg_array is None:
                    self.states[i_state][product] = method(
                        *self.collectors[product].args,
                        **self.collectors[product].kwargs)
                else:
                    i_array = self.collectors[product].arg_array
                    self.states[i_state][product] = np.zeros(
                        len(self.collectors[product].args[i_array]))
                    for i, v in enumerate(
                            self.collectors[product].args[i_array]):
                        args = (
                            list(self.collectors[product].args[:i_array]) +
                            [v] +
                            list(self.collectors[product].args[i_array + 1:]))
                        self.states[i_state][product][i] = method(
                            *args, **self.collectors[product].kwargs)
            # Prepare derived parameters
            d, d_extra = self.get_derived_all(
                derived_requested=(derived == {}))
            derived.update(d)
            self.states[i_state]["derived"] = odict(
                [[p, derived.get(p)] for p in self.output_params])
            # Prepare necessary extra derived parameters
            self.states[i_state]["derived_extra"] = deepcopy(d_extra)
        # make this one the current one by decreasing the antiquity of the rest
        for i in range(self.n_states):
            self.states[i]["last"] -= max(lasts)
        self.states[i_state]["last"] = 1
        return True

    def get_derived_all(self, derived_requested=True):
        """
        Returns a dictionary of derived parameters with their values,
        using the *current* state (i.e. it should only be called from
        the ``compute`` method).

        To get a parameter *from a likelihood* use `get_param` instead.
        """
        list_requested_derived = self.output_params if derived_requested else []
        de_translated = {
            self.translate_param(p): p
            for p in list_requested_derived
        }
        requested_derived_with_extra = list(de_translated.keys()) + list(
            self.derived_extra)
        derived_aux = {}
        # Exceptions
        if "rs_drag" in requested_derived_with_extra:
            requested_derived_with_extra.remove("rs_drag")
            derived_aux["rs_drag"] = self.classy.rs_drag()
        derived_aux.update(
            self.classy.get_current_derived_parameters(
                requested_derived_with_extra))
        # Fill return dictionaries
        derived = {
            de_translated[p]: derived_aux[self.translate_param(p)]
            for p in de_translated
        }
        derived_extra = {p: derived_aux[p] for p in self.derived_extra}
        # No need for error control: classy.get_current_derived_parameters is passed
        # every derived parameter not excluded before, and cause an error if if founds a
        # parameter that it does not recognise
        return derived, derived_extra

    def get_param(self, p):
        """
        Interface function for likelihoods to get sampled and derived parameters.
        """
        current_state = self.current_state()
        for pool in ["params", "derived", "derived_extra"]:
            value = current_state[pool].get(self.translate_param(p), None)
            if value is not None:
                return value
        self.log.error("Parameter not known: '%s'", p)
        raise HandledException

    def get_cl(self, ell_factor=False):
        """
        Returns the power spectra in microK^2
        (unitless for lensing potential),
        using the *current* state.
        """
        current_state = self.current_state()
        # get C_l^XX from the cosmological code
        try:
            cl = deepcopy(current_state["Cl"])
        except:
            self.log.error(
                "No Cl's were computed. Are you sure that you have requested them?"
            )
            raise HandledException
        ell_factor = ((cl["ell"] + 1) * cl["ell"] /
                      (2 * np.pi))[2:] if ell_factor else 1
        # convert dimensionless C_l's to C_l in muK**2
        T = current_state["TCMB"]
        for key in cl:
            # All quantities need to be multiplied by this factor, except the
            # phi-phi term, that is already dimensionless
            if key not in ['pp', 'ell']:
                cl[key][2:] *= (T * 1.e6)**2 * ell_factor
        if "pp" in cl and ell_factor is not 1:
            cl['pp'][2:] *= ell_factor**2 * (2 * np.pi)
        return cl

    def get_fsigma8(self, z):
        indices = np.where(self.z_for_matter_power == z)
        return (self.current_state()["growth_factor_f"][indices] *
                self.current_state()["sigma8"][indices])

    def get_h_of_z(self, z):
        return self.current_state()["h_of_z"][np.where(
            self.collectors["h_of_z"].args[self.collectors["h_of_z"].arg_array]
            == z)] * self.H_units_conv_factor

    def get_angular_diameter_distance(self, z):
        return self.current_state()["angular_diameter_distance"][np.where(
            self.collectors["angular_diameter_distance"].args[
                self.collectors["angular_diameter_distance"].arg_array] == z)]
Пример #5
0
class BAOPowerModel(object):
    # Set redshifts
    z = [0.6, 0.8, 1., 1.2, 1.4, 1.6, 1.8, 2.]

    # other input parameters
    data_directory = "/Users/gerrit/SynologyDrive/Cambridge/H0_project/data/EUCLID_mock_spectra/"

    theory_pk_file = [
        "LCDM_spectra/euclid_mock_lcdm_z1.dat",
        "LCDM_spectra/euclid_mock_lcdm_z2.dat",
        "LCDM_spectra/euclid_mock_lcdm_z3.dat",
        "LCDM_spectra/euclid_mock_lcdm_z4.dat",
        "LCDM_spectra/euclid_mock_lcdm_z5.dat",
        "LCDM_spectra/euclid_mock_lcdm_z6.dat",
        "LCDM_spectra/euclid_mock_lcdm_z7.dat",
        "LCDM_spectra/euclid_mock_lcdm_z8.dat"
    ]

    theory_pk_file_nw = [
        "LCDM_nw_spectra_new/euclid_mock_lcdm_z1.dat",
        "LCDM_nw_spectra_new/euclid_mock_lcdm_z2.dat",
        "LCDM_nw_spectra_new/euclid_mock_lcdm_z3.dat",
        "LCDM_nw_spectra_new/euclid_mock_lcdm_z4.dat",
        "LCDM_nw_spectra_new/euclid_mock_lcdm_z5.dat",
        "LCDM_nw_spectra_new/euclid_mock_lcdm_z6.dat",
        "LCDM_nw_spectra_new/euclid_mock_lcdm_z7.dat",
        "LCDM_nw_spectra_new/euclid_mock_lcdm_z8.dat"
    ]

    cov_file = [
        "LCDM_covmats/euclid_mock_covmat_z1.dat",
        "LCDM_covmats/euclid_mock_covmat_z2.dat",
        "LCDM_covmats/euclid_mock_covmat_z3.dat",
        "LCDM_covmats/euclid_mock_covmat_z4.dat",
        "LCDM_covmats/euclid_mock_covmat_z5.dat",
        "LCDM_covmats/euclid_mock_covmat_z6.dat",
        "LCDM_covmats/euclid_mock_covmat_z7.dat",
        "LCDM_covmats/euclid_mock_covmat_z8.dat"
    ]

    cov_file_nw = [
        "LCDM_nw_covmats_new/euclid_mock_covmat_z1.dat",
        "LCDM_nw_covmats_new/euclid_mock_covmat_z2.dat",
        "LCDM_nw_covmats_new/euclid_mock_covmat_z3.dat",
        "LCDM_nw_covmats_new/euclid_mock_covmat_z4.dat",
        "LCDM_nw_covmats_new/euclid_mock_covmat_z5.dat",
        "LCDM_nw_covmats_new/euclid_mock_covmat_z6.dat",
        "LCDM_nw_covmats_new/euclid_mock_covmat_z7.dat",
        "LCDM_nw_covmats_new/euclid_mock_covmat_z8.dat"
    ]

    theory_fid_params = {
        'h': 0.6821,
        'omega_b': 0.02253,
        'omega_cdm': 0.1177,
        'A_s': 2.216e-9,
        'n_s': 0.9686,
        'tau_reio': 0.085,
        'm_ncdm': 0.06,
        'N_ncdm': 1,
        'N_ur': 2.0328
    }

    use_quadrupole = True
    use_hexadecapole = True

    Delta_k = 0.1

    def __init__(self, n_gauss=30, inflate_error=False):
        self.z = np.asarray(self.z)
        self.n_bin = np.shape(self.z)[0]
        self.inflate_error = inflate_error

        [self.gauss_mu, self.gauss_w] = p_roots(n_gauss)

        self.cosmo_fid = Class()
        self.cosmo_fid.set(self.theory_fid_params)
        self.cosmo_fid.compute()

        self.k_vals, self.Pk_theory_interp = self.interpolate_theory_spectrum(
            self.theory_pk_file)
        dump, self.Pk_theory_nw_interp = self.interpolate_theory_spectrum(
            self.theory_pk_file_nw)

        self.wiggle_only_interp = lambda k, mu: self.Pk_theory_interp(
            k, mu) - self.Pk_theory_nw_interp(k, mu)

        # Load in covariance matrices
        self.all_cov = np.zeros(
            (self.n_bin, (1 + self.use_quadrupole + self.use_hexadecapole) *
             len(self.k_vals),
             (1 + self.use_quadrupole + self.use_hexadecapole) *
             len(self.k_vals)))
        for index_z in range(self.n_bin):
            this_cov = np.loadtxt(
                os.path.join(self.data_directory, self.cov_file_nw[index_z]))

            if self.use_quadrupole and self.use_hexadecapole:
                if len(this_cov) == len(self.k_vals) * 3:
                    pass
                else:
                    raise Exception(
                        'Need correct size covariance for monopole+quadrupole+hexadecapole analysis'
                    )
            elif self.use_quadrupole and self.use_hexadecapole:
                if len(this_cov) == len(self.k_vals) * 2:
                    pass
                elif len(this_cov) == len(self.k_vals) * 3:
                    this_cov = this_cov[:2 * len(self.k_vals), :2 *
                                        len(self.k_vals)]
                else:
                    raise Exception(
                        'Need correct size covariance for monopole+quadrupole analysis'
                    )
            else:
                if len(this_cov) == len(self.k_vals):
                    pass
                elif len(this_cov) == len(self.k_vals) * 2 or len(
                        this_cov) == len(self.k_vals) * 3:
                    this_cov = this_cov[:len(self.k_vals), :len(self.k_vals)]
                else:
                    raise Exception(
                        'Need correct size covariance for monopole-only analysis'
                    )

            self.all_cov[index_z] = this_cov

        self.full_cov = np.zeros(self.all_cov.shape)
        ## Compute theoretical error envelope and combine with usual covariance
        P0, P2, P4 = self.pk_model(self.k_vals, np.ones(self.z.shape),
                                   np.ones(self.z.shape))
        for index_z in range(self.n_bin):
            # Compute the linear (fiducial) power spectrum from CLASS
            envelope_power0 = P0[
                index_z] * 2  # extra factor to ensure we don't underestimate error
            envelope_power2 = envelope_power0 * np.sqrt(
                5.)  # rescale by sqrt{2ell+1}
            envelope_power4 = envelope_power0 * np.sqrt(
                9.)  # rescale by sqrt{2ell+1}

            # Define model power
            if self.inflate_error:
                envelope_power0 *= 5.
                envelope_power2 *= 5.
                envelope_power4 *= 5.

            ## COMPUTE THEORETICAL ERROR COVARIANCE
            # Define coupling matrix
            k_mats = np.meshgrid(self.k_vals, self.k_vals)
            diff_k = k_mats[0] - k_mats[1]
            rho_submatrix = np.exp(-diff_k**2. / (2. * self.Delta_k**2.))

            if self.use_quadrupole and self.use_hexadecapole:
                # Assume uncorrelated multipoles here
                zero_matrix = np.zeros_like(rho_submatrix)
                rho_matrix = np.hstack([
                    np.vstack([rho_submatrix, zero_matrix, zero_matrix]),
                    np.vstack([zero_matrix, rho_submatrix, zero_matrix]),
                    np.vstack([zero_matrix, zero_matrix, rho_submatrix])
                ])
            elif self.use_quadrupole:
                zero_matrix = np.zeros_like(rho_submatrix)
                rho_matrix = np.hstack([
                    np.vstack([rho_submatrix, zero_matrix]),
                    np.vstack([zero_matrix, rho_submatrix])
                ])
            else:
                rho_matrix = rho_submatrix

            # Define error envelope from Baldauf'16

            E_vector0 = np.power(self.k_vals / 0.31, 1.8) * envelope_power0
            if self.use_quadrupole and self.use_hexadecapole:
                E_vector2 = np.power(self.k_vals / 0.31, 1.8) * envelope_power2
                E_vector4 = np.power(self.k_vals / 0.31, 1.8) * envelope_power4
                stacked_E = np.concatenate([E_vector0, E_vector2, E_vector4])
            elif self.use_quadrupole:
                E_vector2 = np.power(self.k_vals / 0.31, 1.8) * envelope_power2
                stacked_E = np.concatenate([E_vector0, E_vector2])
            else:
                stacked_E = E_vector0

            E_mat = np.diag(stacked_E)
            cov_theoretical_error = np.matmul(E_mat,
                                              np.matmul(rho_matrix, E_mat))

            # Compute precision matrix
            self.full_cov[
                index_z] = cov_theoretical_error + self.all_cov[index_z]

    def interpolate_theory_spectrum(self, files):
        # load theory power spectra
        for index_z in range(self.n_bin):
            data = np.loadtxt(os.path.join(self.data_directory,
                                           files[index_z]))

            if index_z == 0:
                k_vals_theory = data[:, 0]
                Pk0_theory = np.zeros((self.n_bin, len(data[:, 0])))
                Pk2_theory = np.zeros((self.n_bin, len(data[:, 0])))
                Pk4_theory = np.zeros((self.n_bin, len(data[:, 0])))

            Pk0_theory[index_z] = data[:, 1]
            if self.use_quadrupole:
                Pk2_theory[index_z] = data[:, 2]
            if self.use_hexadecapole:
                Pk4_theory[index_z] = data[:, 3]

        Pk0_theory_interp = interp1d(k_vals_theory,
                                     Pk0_theory,
                                     fill_value="extrapolate")
        if self.use_quadrupole and self.use_hexadecapole:
            Pk2_theory_interp = interp1d(k_vals_theory,
                                         Pk2_theory,
                                         fill_value="extrapolate")
            Pk4_theory_interp = interp1d(k_vals_theory,
                                         Pk4_theory,
                                         fill_value="extrapolate")

            Pk_theory_interp = lambda k, mu: Pk0_theory_interp(k) * legendre(
                0, mu) + Pk2_theory_interp(k) * legendre(
                    2, mu) + Pk4_theory_interp(k) * legendre(4, mu)
        elif self.use_quadrupole:
            Pk2_theory_interp = interp1d(k_vals_theory,
                                         Pk2_theory,
                                         fill_value="extrapolate")
            Pk_theory_interp = lambda k, mu: Pk0_theory_interp(k) * legendre(
                0, mu) + Pk2_theory_interp(k) * legendre(2, mu)
        else:
            Pk_theory_interp = lambda k, mu: Pk0_theory_interp(k) * legendre(
                0, mu)

        return k_vals_theory, Pk_theory_interp

    def pk_model(self, k_vals, alpha_perp, alpha_par, norm=1.0):
        ## must be in h/Mpc units
        ## Returns linear theory prediction for monopole and quadrupole

        muGrid, kGrid = np.meshgrid(self.gauss_mu, k_vals)
        wGrid = np.einsum('i,j->ij', np.ones(k_vals.shape),
                          self.gauss_w)[np.newaxis, :, :]

        ## Compute AP-rescaled parameters
        F = alpha_par / alpha_perp

        k1 = kGrid[
            np.
            newaxis, :, :] / alpha_perp[:, np.newaxis, np.newaxis] * np.sqrt(
                1. + np.power(muGrid[np.newaxis, :, :], 2.) *
                (np.power(F[:, np.newaxis, np.newaxis], -2.) - 1.))
        mu1 = muGrid[np.newaxis, :, :] / (
            F[:, np.newaxis, np.newaxis] *
            np.sqrt(1. + np.power(muGrid[np.newaxis, :, :], 2.) *
                    (np.power(F[:, np.newaxis, np.newaxis], -2.) - 1.)))

        P_k_mu = self.Pk_theory_nw_interp(
            kGrid, muGrid) + self.wiggle_only_interp(k1, mu1).diagonal(
                axis1=0, axis2=1).transpose([2, 0, 1])

        # Use Gaussian quadrature for fast integral evaluation
        P0_est = np.sum(P_k_mu * wGrid, axis=-1) / 2.

        if self.use_quadrupole:
            P2_est = np.sum(
                P_k_mu * legendre(2, muGrid)[np.newaxis, :, :] * wGrid,
                axis=-1) * 5. / 2.
        else:
            P2_est = 0.

        if self.use_hexadecapole:
            P4_est = np.sum(
                P_k_mu * legendre(4, muGrid)[np.newaxis, :, :] * wGrid,
                axis=-1) * 9. / 2.
        else:
            P4_est = 0.

        return norm * P0_est, norm * P2_est, norm * P4_est

    def get_AP_params(self, cosmo):
        alpha_par = self.cosmo_fid.z_of_r(
            self.z)[1] * self.cosmo_fid.rs_drag() / (cosmo.z_of_r(self.z)[1] *
                                                     cosmo.rs_drag())
        alpha_perp = cosmo.z_of_r(self.z)[0] * self.cosmo_fid.rs_drag() / (
            self.cosmo_fid.z_of_r(self.z)[0] * cosmo.rs_drag())

        return alpha_par, alpha_perp

    def eval(self, k_vals, cosmo, norm=1.0):
        alpha_par, alpha_perp = self.get_AP_params(cosmo)
        return self.pk_model(k_vals, alpha_par, alpha_perp, norm=norm)

    def eval_fid(self, k_vals, norm=1.0):
        alpha_par, alpha_perp = self.get_AP_params(self.cosmo_fid)
        return self.pk_model(k_vals, alpha_par, alpha_perp, norm=norm)

    def get_covariance(self):
        return self.full_cov
Пример #6
0
class classy(BoltzmannBase):
    # Name of the Class repo/folder and version to download
    _classy_repo_name = "lesgourg/class_public"
    _min_classy_version = "v2.9.3"
    _classy_repo_version = os.environ.get('CLASSY_REPO_VERSION', _min_classy_version)

    def initialize(self):
        """Importing CLASS from the correct path, if given, and if not, globally."""
        # Allow global import if no direct path specification
        allow_global = not self.path
        if not self.path and self.packages_path:
            self.path = self.get_path(self.packages_path)
        self.classy_module = self.is_installed(path=self.path, allow_global=allow_global)
        if not self.classy_module:
            raise NotInstalledError(
                self.log, "Could not find CLASS. Check error message above.")
        from classy import Class, CosmoSevereError, CosmoComputationError
        global CosmoComputationError, CosmoSevereError
        self.classy = Class()
        super().initialize()
        # Add general CLASS stuff
        self.extra_args["output"] = self.extra_args.get("output", "")
        if "sBBN file" in self.extra_args:
            self.extra_args["sBBN file"] = (
                self.extra_args["sBBN file"].format(classy=self.path))
        # Derived parameters that may not have been requested, but will be necessary later
        self.derived_extra = []
        self.log.info("Initialized!")

    def must_provide(self, **requirements):
        # Computed quantities required by the likelihood
        super().must_provide(**requirements)
        for k, v in self._must_provide.items():
            # Products and other computations
            if k == "Cl":
                if any(("t" in cl.lower()) for cl in v):
                    self.extra_args["output"] += " tCl"
                if any((("e" in cl.lower()) or ("b" in cl.lower())) for cl in v):
                    self.extra_args["output"] += " pCl"
                # For modern experiments, always lensed Cl's!
                self.extra_args["output"] += " lCl"
                self.extra_args["lensing"] = "yes"
                # For l_max_scalars, remember previous entries.
                self.extra_args["l_max_scalars"] = max(v.values())
                self.collectors[k] = Collector(
                    method="lensed_cl", kwargs={"lmax": self.extra_args["l_max_scalars"]})
                if 'T_cmb' not in self.derived_extra:
                    self.derived_extra += ['T_cmb']
            elif k == "Hubble":
                self.collectors[k] = Collector(
                    method="Hubble",
                    args=[np.atleast_1d(v["z"])],
                    args_names=["z"],
                    arg_array=0)
            elif k == "angular_diameter_distance":
                self.collectors[k] = Collector(
                    method="angular_distance",
                    args=[np.atleast_1d(v["z"])],
                    args_names=["z"],
                    arg_array=0)
            elif k == "comoving_radial_distance":
                self.collectors[k] = Collector(
                    method="z_of_r",
                    args_names=["z"],
                    args=[np.atleast_1d(v["z"])])
            elif isinstance(k, tuple) and k[0] == "Pk_grid":
                self.extra_args["output"] += " mPk"
                v = deepcopy(v)
                self.add_P_k_max(v.pop("k_max"), units="1/Mpc")
                # NB: Actually, only the max z is used, and the actual sampling in z
                # for computing P(k,z) is controlled by `perturb_sampling_stepsize`
                # (default: 0.1). But let's leave it like this in case this changes
                # in the future.
                self.add_z_for_matter_power(v.pop("z"))

                if v["nonlinear"] and "non linear" not in self.extra_args:
                    self.extra_args["non linear"] = non_linear_default_code
                pair = k[2:]
                if pair == ("delta_tot", "delta_tot"):
                    v["only_clustering_species"] = False
                elif pair == ("delta_nonu", "delta_nonu"):
                    v["only_clustering_species"] = True
                else:
                    raise LoggedError(self.log, "NotImplemented in CLASS: %r", pair)
                self.collectors[k] = Collector(
                    method="get_pk_and_k_and_z",
                    kwargs=v,
                    post=(lambda P, kk, z: (kk, z, np.array(P).T)))
            elif isinstance(k, tuple) and k[0] == "sigma_R":
                raise LoggedError(
                    self.log, "Classy sigma_R not implemented as yet - use CAMB only")
            elif v is None:
                k_translated = self.translate_param(k)
                if k_translated not in self.derived_extra:
                    self.derived_extra += [k_translated]
            else:
                raise LoggedError(self.log, "Requested product not known: %r", {k: v})
        # Derived parameters (if some need some additional computations)
        if any(("sigma8" in s) for s in self.output_params or requirements):
            self.extra_args["output"] += " mPk"
            self.add_P_k_max(1, units="1/Mpc")
        # Adding tensor modes if requested
        if self.extra_args.get("r") or "r" in self.input_params:
            self.extra_args["modes"] = "s,t"
        # If B spectrum with l>50, or lensing, recommend using Halofit
        cls = self._must_provide.get("Cl", {})
        has_BB_l_gt_50 = (any(("b" in cl.lower()) for cl in cls) and
                          max(cls[cl] for cl in cls if "b" in cl.lower()) > 50)
        has_lensing = any(("p" in cl.lower()) for cl in cls)
        if (has_BB_l_gt_50 or has_lensing) and not self.extra_args.get("non linear"):
            self.log.warning("Requesting BB for ell>50 or lensing Cl's: "
                             "using a non-linear code is recommended (and you are not "
                             "using any). To activate it, set "
                             "'non_linear: halofit|hmcode|...' in classy's 'extra_args'.")
        # Cleanup of products string
        self.extra_args["output"] = " ".join(set(self.extra_args["output"].split()))
        self.check_no_repeated_input_extra()

    def add_z_for_matter_power(self, z):
        if getattr(self, "z_for_matter_power", None) is None:
            self.z_for_matter_power = np.empty(0)
        self.z_for_matter_power = np.flip(np.sort(np.unique(np.concatenate(
            [self.z_for_matter_power, np.atleast_1d(z)]))), axis=0)
        self.extra_args["z_pk"] = " ".join(["%g" % zi for zi in self.z_for_matter_power])

    def add_P_k_max(self, k_max, units):
        r"""
        Unifies treatment of :math:`k_\mathrm{max}` for matter power spectrum:
        ``P_k_max_[1|h]/Mpc]``.

        Make ``units="1/Mpc"|"h/Mpc"``.
        """
        # Fiducial h conversion (high, though it may slow the computations)
        h_fid = 1
        if units == "h/Mpc":
            k_max *= h_fid
        # Take into account possible manual set of P_k_max_***h/Mpc*** through extra_args
        k_max_old = self.extra_args.pop(
            "P_k_max_1/Mpc", h_fid * self.extra_args.pop("P_k_max_h/Mpc", 0))
        self.extra_args["P_k_max_1/Mpc"] = max(k_max, k_max_old)

    def set(self, params_values_dict):
        # If no output requested, remove arguments that produce an error
        # (e.g. complaints if halofit requested but no Cl's computed.)
        # Needed for facilitating post-processing
        if not self.extra_args["output"]:
            for k in ["non linear"]:
                self.extra_args.pop(k, None)
        # Prepare parameters to be passed: this-iteration + extra
        args = {self.translate_param(p): v for p, v in params_values_dict.items()}
        args.update(self.extra_args)
        # Generate and save
        self.log.debug("Setting parameters: %r", args)
        self.classy.set(**args)

    def calculate(self, state, want_derived=True, **params_values_dict):
        # Set parameters
        self.set(params_values_dict)
        # Compute!
        try:
            self.classy.compute()
        # "Valid" failure of CLASS: parameters too extreme -> log and report
        except CosmoComputationError as e:
            if self.stop_at_error:
                self.log.error(
                    "Computation error (see traceback below)! "
                    "Parameters sent to CLASS: %r and %r.\n"
                    "To ignore this kind of error, make 'stop_at_error: False'.",
                    state["params"], dict(self.extra_args))
                raise
            else:
                self.log.debug("Computation of cosmological products failed. "
                               "Assigning 0 likelihood and going on. "
                               "The output of the CLASS error was %s" % e)
            return False
        # CLASS not correctly initialized, or input parameters not correct
        except CosmoSevereError:
            self.log.error("Serious error setting parameters or computing results. "
                           "The parameters passed were %r and %r. To see the original "
                           "CLASS' error traceback, make 'debug: True'.",
                           state["params"], self.extra_args)
            raise  # No LoggedError, so that CLASS traceback gets printed
        # Gather products
        for product, collector in self.collectors.items():
            # Special case: sigma8 needs H0, which cannot be known beforehand:
            if "sigma8" in self.collectors:
                self.collectors["sigma8"].args[0] = 8 / self.classy.h()
            method = getattr(self.classy, collector.method)
            arg_array = self.collectors[product].arg_array
            if arg_array is None:
                state[product] = method(
                    *self.collectors[product].args, **self.collectors[product].kwargs)
            elif isinstance(arg_array, int):
                state[product] = np.zeros(
                    len(self.collectors[product].args[arg_array]))
                for i, v in enumerate(self.collectors[product].args[arg_array]):
                    args = (list(self.collectors[product].args[:arg_array]) + [v] +
                            list(self.collectors[product].args[arg_array + 1:]))
                    state[product][i] = method(
                        *args, **self.collectors[product].kwargs)
            elif arg_array in self.collectors[product].kwargs:
                value = np.atleast_1d(self.collectors[product].kwargs[arg_array])
                state[product] = np.zeros(value.shape)
                for i, v in enumerate(value):
                    kwargs = deepcopy(self.collectors[product].kwargs)
                    kwargs[arg_array] = v
                    state[product][i] = method(
                        *self.collectors[product].args, **kwargs)
            if collector.post:
                state[product] = collector.post(*state[product])
        # Prepare derived parameters
        d, d_extra = self._get_derived_all(derived_requested=want_derived)
        if want_derived:
            state["derived"] = {p: d.get(p) for p in self.output_params}
            # Prepare necessary extra derived parameters
        state["derived_extra"] = deepcopy(d_extra)

    def _get_derived_all(self, derived_requested=True):
        """
        Returns a dictionary of derived parameters with their values,
        using the *current* state (i.e. it should only be called from
        the ``compute`` method).

        Parameter names are returned in CLASS nomenclature.

        To get a parameter *from a likelihood* use `get_param` instead.
        """
        # TODO: fails with derived_requested=False
        # Put all parameters in CLASS nomenclature (self.derived_extra already is)
        requested = [self.translate_param(p) for p in (
            self.output_params if derived_requested else [])]
        requested_and_extra = dict.fromkeys(set(requested).union(set(self.derived_extra)))
        # Parameters with their own getters
        if "rs_drag" in requested_and_extra:
            requested_and_extra["rs_drag"] = self.classy.rs_drag()
        if "Omega_nu" in requested_and_extra:
            requested_and_extra["Omega_nu"] = self.classy.Omega_nu
        if "T_cmb" in requested_and_extra:
            requested_and_extra["T_cmb"] = self.classy.T_cmb()
        # Get the rest using the general derived param getter
        # No need for error control: classy.get_current_derived_parameters is passed
        # every derived parameter not excluded before, and cause an error, indicating
        # which parameters are not recognized
        requested_and_extra.update(
            self.classy.get_current_derived_parameters(
                [p for p, v in requested_and_extra.items() if v is None]))
        # Separate the parameters before returning
        # Remember: self.output_params is in sampler nomenclature,
        # but self.derived_extra is in CLASS
        derived = {
            p: requested_and_extra[self.translate_param(p)] for p in self.output_params}
        derived_extra = {p: requested_and_extra[p] for p in self.derived_extra}
        return derived, derived_extra

    def get_Cl(self, ell_factor=False, units="FIRASmuK2"):
        try:
            cls = deepcopy(self._current_state["Cl"])
        except:
            raise LoggedError(
                self.log,
                "No Cl's were computed. Are you sure that you have requested them?")
        # unit conversion and ell_factor
        ells_factor = ((cls["ell"] + 1) * cls["ell"] / (2 * np.pi))[
                      2:] if ell_factor else 1
        units_factor = self._cmb_unit_factor(
            units, self._current_state['derived_extra']['T_cmb'])

        for cl in cls:
            if cl not in ['pp', 'ell']:
                cls[cl][2:] *= units_factor ** 2 * ells_factor
        if "pp" in cls and ell_factor:
            cls['pp'][2:] *= ells_factor ** 2 * (2 * np.pi)
        return cls

    def _get_z_dependent(self, quantity, z):
        try:
            z_name = next(k for k in ["redshifts", "z"]
                          if k in self.collectors[quantity].kwargs)
            computed_redshifts = self.collectors[quantity].kwargs[z_name]
        except StopIteration:
            computed_redshifts = self.collectors[quantity].args[
                self.collectors[quantity].args_names.index("z")]
        i_kwarg_z = np.concatenate(
            [np.where(computed_redshifts == zi)[0] for zi in np.atleast_1d(z)])
        values = np.array(deepcopy(self._current_state[quantity]))
        if quantity == "comoving_radial_distance":
            values = values[0]
        return values[i_kwarg_z]

    def close(self):
        self.classy.empty()

    def get_can_provide_params(self):
        names = ['Omega_Lambda', 'Omega_cdm', 'Omega_b', 'Omega_m', 'rs_drag', 'z_reio',
                 'YHe', 'Omega_k', 'age', 'sigma8']
        for name, mapped in self.renames.items():
            if mapped in names:
                names.append(name)
        return names

    def get_version(self):
        return getattr(self.classy_module, '__version__', None)

    # Installation routines

    @classmethod
    def get_path(cls, path):
        return os.path.realpath(os.path.join(path, "code", cls.__name__))

    @classmethod
    def get_import_path(cls, path):
        log = logging.getLogger(cls.__name__)
        classy_build_path = os.path.join(path, "python", "build")
        if not os.path.isdir(classy_build_path):
            log.error("Either CLASS is not in the given folder, "
                      "'%s', or you have not compiled it.", path)
            return None
        py_version = "%d.%d" % (sys.version_info.major, sys.version_info.minor)
        try:
            post = next(d for d in os.listdir(classy_build_path)
                        if (d.startswith("lib.") and py_version in d))
        except StopIteration:
            log.error("The CLASS installation at '%s' has not been compiled for the "
                      "current Python version.", path)
            return None
        return os.path.join(classy_build_path, post)

    @classmethod
    def is_compatible(cls):
        import platform
        if platform.system() == "Windows":
            return False
        return True

    @classmethod
    def is_installed(cls, **kwargs):
        log = logging.getLogger(cls.__name__)
        if not kwargs.get("code", True):
            return True
        path = kwargs["path"]
        if path is not None and path.lower() == "global":
            path = None
        if path and not kwargs.get("allow_global"):
            log.info("Importing *local* CLASS from '%s'.", path)
            if not os.path.exists(path):
                log.error("The given folder does not exist: '%s'", path)
                return False
            classy_build_path = cls.get_import_path(path)
            if not classy_build_path:
                return False
        elif not path:
            log.info("Importing *global* CLASS.")
            classy_build_path = None
        else:
            log.info("Importing *auto-installed* CLASS (but defaulting to *global*).")
            classy_build_path = cls.get_import_path(path)
        try:
            return load_module(
                'classy', path=classy_build_path, min_version=cls._classy_repo_version)
        except ImportError:
            if path is not None and path.lower() != "global":
                log.error("Couldn't find the CLASS python interface at '%s'. "
                          "Are you sure it has been installed there?", path)
            else:
                log.error("Could not import global CLASS installation. "
                          "Specify a Cobaya or CLASS installation path, "
                          "or install the CLASS Python interface globally with "
                          "'cd /path/to/class/python/ ; python setup.py install'")
            return False
        except VersionCheckError as e:
            log.error(str(e))
            return False

    @classmethod
    def install(cls, path=None, force=False, code=True, no_progress_bars=False, **kwargs):
        log = logging.getLogger(cls.__name__)
        if not code:
            log.info("Code not requested. Nothing to do.")
            return True
        log.info("Installing pre-requisites...")
        exit_status = pip_install("cython")
        if exit_status:
            log.error("Could not install pre-requisite: cython")
            return False
        log.info("Downloading classy...")
        success = download_github_release(
            os.path.join(path, "code"), cls._classy_repo_name, cls._classy_repo_version,
            repo_rename=cls.__name__, no_progress_bars=no_progress_bars, logger=log)
        if not success:
            log.error("Could not download classy.")
            return False
        classy_path = cls.get_path(path)
        log.info("Compiling classy...")
        from subprocess import Popen, PIPE
        env = deepcopy(os.environ)
        env.update({"PYTHON": sys.executable})
        process_make = Popen(["make"], cwd=classy_path, stdout=PIPE, stderr=PIPE, env=env)
        out, err = process_make.communicate()
        if process_make.returncode:
            log.info(out)
            log.info(err)
            log.error("Compilation failed!")
            return False
        return True
Пример #7
0
]

norm = 1.
# b1 = 1.909433
# b2 = -2.357092
# bG2 = 3.818261e-01
# css0 = -2.911944e+01
# css2 = -1.235181e+01
# Pshot = 2.032084e+03
# bGamma3 = 0.
# b4 = 1.924983e+02

print('S8=', cosmo.sigma8() * (cosmo.Omega_m() / 0.3)**0.5)

kmsMpc = 3.33564095198145e-6
rd = cosmo.rs_drag()
print('rd=', rd)

for j in range(len(chunk)):
    z = zs[j]
    b1 = b1ar[j]
    b2 = b2ar[j]
    bG2 = bG2ar[j]
    css0 = css0ar[j]
    css2 = css2ar[j]
    Pshot = Pshotar[j]
    bGamma3 = bGamma3ar[j]
    b4 = b4ar[j]
    fz = cosmo.scale_independent_growth_factor_f(z)
    da = cosmo.angular_distance(z)
    # print('DA=',da)
Пример #8
0
class euclid_bao_only(Likelihood):
    """# Unreconstructed EUCLID multipole data
    data_directory = "/Users/gerrit/SynologyDrive/Cambridge/H0_project/data/"

    # Set redshifts
    z = [0.6, 0.8, 1., 1.2, 1.4, 1.6, 1.8, 2.]

    # other input parameters
    cov_file = ["EUCLID_mock_spectra/LCDM_covmats/euclid_mock_covmat_z1.dat", "EUCLID_mock_spectra/LCDM_covmats/euclid_mock_covmat_z2.dat", "EUCLID_mock_spectra/LCDM_covmats/euclid_mock_covmat_z3.dat", "EUCLID_mock_spectra/LCDM_covmats/euclid_mock_covmat_z4.dat", "EUCLID_mock_spectra/LCDM_covmats/euclid_mock_covmat_z5.dat", "EUCLID_mock_spectra/LCDM_covmats/euclid_mock_covmat_z6.dat", "EUCLID_mock_spectra/LCDM_covmats/euclid_mock_covmat_z7.dat",
                                "EUCLID_mock_spectra/LCDM_covmats/euclid_mock_covmat_z8.dat"]
    theory_pk_file = ["EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z1.dat", "EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z2.dat", "EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z3.dat", "EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z4.dat", "EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z5.dat", "EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z6.dat", "EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z7.dat",
                                      "EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z8.dat"]
    theory_pk_file_nw = ["EUCLID_mock_spectra/LCDM_nw_spectra/euclid_mock_lcdm_z1.dat", "EUCLID_mock_spectra/LCDM_nw_spectra/euclid_mock_lcdm_z2.dat", "EUCLID_mock_spectra/LCDM_nw_spectra/euclid_mock_lcdm_z3.dat", "EUCLID_mock_spectra/LCDM_nw_spectra/euclid_mock_lcdm_z4.dat", "EUCLID_mock_spectra/LCDM_nw_spectra/euclid_mock_lcdm_z5.dat", "EUCLID_mock_spectra/LCDM_nw_spectra/euclid_mock_lcdm_z6.dat", "EUCLID_mock_spectra/LCDM_nw_spectra/euclid_mock_lcdm_z7.dat",
                                         "EUCLID_mock_spectra/LCDM_nw_spectra/euclid_mock_lcdm_z8.dat"]

    file = ["EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z1.dat", "EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z2.dat", "EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z3.dat", "EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z4.dat", "EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z5.dat", "EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z6.dat", "EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z7.dat",
                            "EUCLID_mock_spectra/LCDM_spectra/euclid_mock_lcdm_z8.dat"]

    use_quadrupole = True
    use_hexadecapole = True

    inflate_error = False

    Delta_k = 0.1

    use_nuisance = ['norm'] + ['b0_{}'.format(z_i + 1) for z_i in range(len(z))] + ['b2_{}'.format(z_i + 1) for z_i in range(len(z))] + ['b4_{}'.format(z_i + 1) for z_i in range(len(z))]"""
    def __init__(self, path, data, command_line):

        Likelihood.__init__(self, path, data, command_line)

        self.z = np.asarray(self.z)
        self.n_bin = np.shape(self.z)[0]

        self.cosmo_fid = Class()
        self.cosmo_fid.set({
            'h': 0.6821,
            'omega_b': 0.02253,
            'omega_cdm': 0.1177,
            'A_s': 2.216e-9,
            'n_s': 0.9686,
            'tau_reio': 0.085,
            'm_ncdm': 0.06,
            'N_ncdm': 1,
            'N_ur': 2.0328
        })
        self.cosmo_fid.compute()

        ## Define parameters for Gaussian quadrature
        n_gauss = 30  # number of Gaussian quadrature points
        [self.gauss_mu, self.gauss_w] = p_roots(n_gauss)

        self.k_vals = None
        self.Pk0_data = None
        self.Pk2_data = None
        self.Pk4_data = None

        for index_z in range(self.n_bin):
            data = np.loadtxt(
                os.path.join(self.data_directory, self.file[index_z]))
            # define input arrays
            if index_z == 0:
                self.k_vals = data[:, 0]
                self.Pk0_data = np.zeros((self.n_bin, len(data[:, 0])))
                self.Pk2_data = np.zeros((self.n_bin, len(data[:, 0])))
                self.Pk4_data = np.zeros((self.n_bin, len(data[:, 0])))

            self.Pk0_data[index_z] = data[:, 1]
            if self.use_quadrupole:
                self.Pk2_data[index_z] = data[:, 2]
            if self.use_hexadecapole:
                self.Pk4_data[index_z] = data[:, 3]

        self.k_vals, self.Pk_theory_interp = self.interpolate_theory_spectrum(
            self.theory_pk_file)
        dump, self.Pk_theory_nw_interp = self.interpolate_theory_spectrum(
            self.theory_pk_file_nw)

        self.wiggle_only_interp = lambda k, mu: self.Pk_theory_interp(
            k, mu) - self.Pk_theory_nw_interp(k, mu)

        self.muGrid, self.kGrid = np.meshgrid(self.gauss_mu, self.k_vals)
        self.wGrid = np.einsum('i,j->ij', np.ones(self.k_vals.shape),
                               self.gauss_w)[np.newaxis, :, :]

        # Load in covariance matrices
        self.all_cov = np.zeros(
            (self.n_bin, (1 + self.use_quadrupole + self.use_hexadecapole) *
             len(self.k_vals),
             (1 + self.use_quadrupole + self.use_hexadecapole) *
             len(self.k_vals)))
        for index_z in range(self.n_bin):
            this_cov = np.loadtxt(
                os.path.join(self.data_directory, self.cov_file[index_z]))

            if self.use_quadrupole and self.use_hexadecapole:
                if len(this_cov) == len(self.k_vals) * 3:
                    pass
                else:
                    raise Exception(
                        'Need correct size covariance for monopole+quadrupole+hexadecapole analysis'
                    )
            elif self.use_quadrupole and self.use_hexadecapole:
                if len(this_cov) == len(self.k_vals) * 2:
                    pass
                elif len(this_cov) == len(self.k_vals) * 3:
                    this_cov = this_cov[:2 * len(self.k_vals), :2 *
                                        len(self.k_vals)]
                else:
                    raise Exception(
                        'Need correct size covariance for monopole+quadrupole analysis'
                    )
            else:
                if len(this_cov) == len(self.k_vals):
                    pass
                elif len(this_cov) == len(self.k_vals) * 2 or len(
                        this_cov) == len(self.k_vals) * 3:
                    this_cov = this_cov[:len(self.k_vals), :len(self.k_vals)]
                else:
                    raise Exception(
                        'Need correct size covariance for monopole-only analysis'
                    )

            self.all_cov[index_z] = this_cov

        # Now define multipoles
        self.leg2 = legendre(
            2, self.muGrid)  #0.5 * (3. * np.power(self.muGrid, 2.) - 1.)
        self.leg2 = self.leg2[np.newaxis, :, :]
        self.leg4 = legendre(
            4, self.muGrid
        )  #0.125 * (35. * np.power(self.muGrid, 4.) - 30. * np.power(self.muGrid, 2.) + 3)
        self.leg4 = self.leg4[np.newaxis, :, :]

        self.full_invcov = np.zeros(self.all_cov.shape)
        ## Compute theoretical error envelope and combine with usual covariance
        P0, P2, P4 = self.pk_model(np.ones(self.z.shape),
                                   np.ones(self.z.shape))
        for index_z in range(self.n_bin):

            # Compute the linear (fiducial) power spectrum from CLASS
            envelope_power0 = P0[
                index_z] * 2  # extra factor to ensure we don't underestimate error
            envelope_power2 = envelope_power0 * np.sqrt(
                5.)  # rescale by sqrt{2ell+1}
            envelope_power4 = envelope_power0 * np.sqrt(
                9.)  # rescale by sqrt{2ell+1}

            # Define model power
            if self.inflate_error:
                envelope_power0 *= 5.
                envelope_power2 *= 5.
                envelope_power4 *= 5.

            ## COMPUTE THEORETICAL ERROR COVARIANCE
            # Define coupling matrix
            k_mats = np.meshgrid(self.k_vals, self.k_vals)
            diff_k = k_mats[0] - k_mats[1]
            rho_submatrix = np.exp(-diff_k**2. / (2. * self.Delta_k**2.))

            if self.use_quadrupole and self.use_hexadecapole:
                # Assume uncorrelated multipoles here
                zero_matrix = np.zeros_like(rho_submatrix)
                rho_matrix = np.hstack([
                    np.vstack([rho_submatrix, zero_matrix, zero_matrix]),
                    np.vstack([zero_matrix, rho_submatrix, zero_matrix]),
                    np.vstack([zero_matrix, zero_matrix, rho_submatrix])
                ])
            elif self.use_quadrupole:
                zero_matrix = np.zeros_like(rho_submatrix)
                rho_matrix = np.hstack([
                    np.vstack([rho_submatrix, zero_matrix]),
                    np.vstack([zero_matrix, rho_submatrix])
                ])
            else:
                rho_matrix = rho_submatrix

            # Define error envelope from Baldauf'16

            E_vector0 = np.power(self.k_vals / 0.31, 1.8) * envelope_power0
            if self.use_quadrupole and self.use_hexadecapole:
                E_vector2 = np.power(self.k_vals / 0.31, 1.8) * envelope_power2
                E_vector4 = np.power(self.k_vals / 0.31, 1.8) * envelope_power4
                stacked_E = np.concatenate([E_vector0, E_vector2, E_vector4])
            elif self.use_quadrupole:
                E_vector2 = np.power(self.k_vals / 0.31, 1.8) * envelope_power2
                stacked_E = np.concatenate([E_vector0, E_vector2])
            else:
                stacked_E = E_vector0

            E_mat = np.diag(stacked_E)
            cov_theoretical_error = np.matmul(E_mat,
                                              np.matmul(rho_matrix, E_mat))

            self.full_invcov[index_z] = np.linalg.inv(cov_theoretical_error +
                                                      self.all_cov[index_z])

    def interpolate_theory_spectrum(self, files):
        # load theory power spectra
        for index_z in range(self.n_bin):
            data = np.loadtxt(os.path.join(self.data_directory,
                                           files[index_z]))

            if index_z == 0:
                k_vals_theory = data[:, 0]
                Pk0_theory = np.zeros((self.n_bin, len(data[:, 0])))
                Pk2_theory = np.zeros((self.n_bin, len(data[:, 0])))
                Pk4_theory = np.zeros((self.n_bin, len(data[:, 0])))

            Pk0_theory[index_z] = data[:, 1]
            if self.use_quadrupole:
                Pk2_theory[index_z] = data[:, 2]
            if self.use_hexadecapole:
                Pk4_theory[index_z] = data[:, 3]

        Pk0_theory_interp = interp1d(k_vals_theory,
                                     Pk0_theory,
                                     fill_value="extrapolate")
        if self.use_quadrupole and self.use_hexadecapole:
            Pk2_theory_interp = interp1d(k_vals_theory,
                                         Pk2_theory,
                                         fill_value="extrapolate")
            Pk4_theory_interp = interp1d(k_vals_theory,
                                         Pk4_theory,
                                         fill_value="extrapolate")

            Pk_theory_interp = lambda k, mu: Pk0_theory_interp(k) * legendre(
                0, mu) + Pk2_theory_interp(k) * legendre(
                    2, mu) + Pk4_theory_interp(k) * legendre(4, mu)
        elif self.use_quadrupole:
            Pk2_theory_interp = interp1d(k_vals_theory,
                                         Pk2_theory,
                                         fill_value="extrapolate")
            Pk_theory_interp = lambda k, mu: Pk0_theory_interp(k) * legendre(
                0, mu) + Pk2_theory_interp(k) * legendre(2, mu)
        else:
            Pk_theory_interp = lambda k, mu: Pk0_theory_interp(k) * legendre(
                0, mu)

        return k_vals_theory, Pk_theory_interp

    def pk_model(self, alpha_perp, alpha_par, norm=1.0):
        ## must be in h/Mpc units
        ## Returns linear theory prediction for monopole and quadrupole

        ## Compute AP-rescaled parameters
        F = alpha_par / alpha_perp

        k1 = self.kGrid[
            np.
            newaxis, :, :] / alpha_perp[:, np.newaxis, np.newaxis] * np.sqrt(
                1. + np.power(self.muGrid[np.newaxis, :, :], 2.) *
                (np.power(F[:, np.newaxis, np.newaxis], -2.) - 1.))
        mu1 = self.muGrid[np.newaxis, :, :] / (
            F[:, np.newaxis, np.newaxis] *
            np.sqrt(1. + np.power(self.muGrid[np.newaxis, :, :], 2.) *
                    (np.power(F[:, np.newaxis, np.newaxis], -2.) - 1.)))

        P_k_mu = self.Pk_theory_nw_interp(
            self.kGrid, self.muGrid) + self.wiggle_only_interp(
                k1, mu1).diagonal(axis1=0, axis2=1).transpose([2, 0, 1])

        # Use Gaussian quadrature for fast integral evaluation
        P0_est = np.sum(P_k_mu * self.wGrid, axis=-1) / 2.

        if self.use_quadrupole:
            P2_est = np.sum(P_k_mu * self.leg2 * self.wGrid, axis=-1) * 5. / 2.
        else:
            P2_est = 0.

        if self.use_hexadecapole:
            P4_est = np.sum(P_k_mu * self.leg4 * self.wGrid, axis=-1) * 9. / 2.
        else:
            P4_est = 0.

        return norm * P0_est, norm * P2_est, norm * P4_est

    def loglkl(self, cosmo, data):
        """

        Parameters
        ----------
        cosmo : Class
        """
        chi2 = 0.0

        norm = data.mcmc_parameters['norm']['current'] * data.mcmc_parameters[
            'norm']['scale']
        alpha_rs = data.mcmc_parameters['alpha_rs'][
            'current'] * data.mcmc_parameters['alpha_rs']['scale']

        alpha_par = self.cosmo_fid.z_of_r(
            self.z)[1] * self.cosmo_fid.rs_drag() / (
                cosmo.z_of_r(self.z)[1] * alpha_rs * cosmo.rs_drag())
        alpha_perp = cosmo.z_of_r(self.z)[0] * self.cosmo_fid.rs_drag() / (
            self.cosmo_fid.z_of_r(self.z)[0] * alpha_rs * cosmo.rs_drag())

        ## Compute power spectrum multipoles
        P0_predictions, P2_predictions, P4_predictions = self.pk_model(
            alpha_perp, alpha_par, norm=norm)
        #return P0_predictions, P2_predictions, P4_predictions

        # Compute chi2 for each z-mean
        for index_z in range(self.n_bin):
            bias0 = data.mcmc_parameters['b0_{}'.format(
                index_z + 1)]['current'] * data.mcmc_parameters['b0_{}'.format(
                    index_z + 1)]['scale']
            bias2 = data.mcmc_parameters['b2_{}'.format(
                index_z + 1)]['current'] * data.mcmc_parameters['b2_{}'.format(
                    index_z + 1)]['scale']
            bias4 = data.mcmc_parameters['b4_{}'.format(
                index_z + 1)]['current'] * data.mcmc_parameters['b4_{}'.format(
                    index_z + 1)]['scale']

            # Create vector of residual pk
            if self.use_quadrupole and self.use_hexadecapole:
                stacked_model = np.concatenate([
                    bias0 * P0_predictions[index_z],
                    bias2 * P2_predictions[index_z],
                    bias4 * P4_predictions[index_z]
                ])
                stacked_data = np.concatenate([
                    self.Pk0_data[index_z], self.Pk2_data[index_z],
                    self.Pk4_data[index_z]
                ])
            elif self.use_quadrupole:
                stacked_model = np.concatenate([
                    bias0 * P0_predictions[index_z],
                    bias2 * P2_predictions[index_z]
                ])
                stacked_data = np.concatenate(
                    [self.Pk0_data[index_z], self.Pk2_data[index_z]])
            else:
                stacked_model = bias0 * P0_predictions[index_z]
                stacked_data = self.Pk0_data[index_z]
            resid_vec = stacked_data - stacked_model

            # NB: should use cholesky decomposition and triangular factorization when we need to invert arrays later
            mb = 0  # minimum bin
            chi2 += float(
                np.matmul(
                    resid_vec[mb:].T,
                    np.matmul(self.full_invcov[index_z, mb:, mb:],
                              resid_vec[mb:])))

        if self.use_alpha_rs_prior:
            chi2 += (alpha_rs - 1.0)**2. / self.alpha_rs_prior**2.

        lkl = -0.5 * chi2
        return lkl