def multiplicityfunction(self, sigma):
        """
        /* Multiplicity function - this is where the various fitting functions/analytic 
        theories are different.  The various places where I found these fitting functions
        are listed below.  */
        """
        
        nu = self.delta_c0 / sigma;
        
        if self.fitting_function==1:
            # Press-Schechter (This form from Jenkins et al. 2001, MNRAS 321, 372-384, eqtn. 5)
            thismult = math.sqrt(2.0/math.pi) * nu * math.exp(-0.5*nu*nu);
        
        elif self.fitting_function==2:
            # Jenkins et al. 2001, MNRAS 321, 372-384, eqtn. 9
            thismult = 0.315 * math.exp( -1.0 * math.pow( abs( math.log(1.0/sigma) + 0.61), 3.8 ) );
        
        elif self.fitting_function==3:
            # Sheth-Tormen 1999, eqtn 10, using expression from Jenkins et al. 2001, eqtn. 7
            A=0.3222;
            a=0.707;
            p=0.3;
            thismult = A*math.sqrt(2.0*a/math.pi)*(1.0+ math.pow( 1.0/(nu*nu*a), p) )*\
            nu * math.exp(-0.5*a*nu*nu);
        
        elif self.fitting_function==4:
            # LANL fitting function - Warren et al. 2005, astro-ph/0506395, eqtn. 5 
            A=0.7234; 
            a=1.625; 
            b=0.2538; 
            c=1.1982;
            thismult = A*( math.pow(sigma, -1.0*a) + b)*math.exp(-1.0*c / sigma / sigma );

        elif self.fitting_function==5:
            # Tinker et al. 2008, eqn 3, \Delta=300 # \Delta=200
            A = 0.2 #0.186
            a = 1.52 #1.47
            b = 2.25 #2.57
            c = 1.27 #1.19
            thismult = A * ( math.pow((sigma / b), -a) + 1) * \
                math.exp(-1 * c / sigma / sigma)
        
        else:
            mylog.error("Don't understand this.  Fitting function requested is %d\n",
            self.fitting_function)
            return None
        
        return thismult
Beispiel #2
0
    def __init__(self, omega_matter, omega_baryon, omega_hdm, degen_hdm,
                 omega_lambda, hubble, redshift):
        self.qwarn = 0
        self.theta_cmb = 2.728 / 2.7  # Assuming T_cmb = 2.728 K

        # Look for strange input
        if (omega_baryon < 0.0):
            mylog.error(
                "TFmdm_set_cosm(): Negative omega_baryon set to trace amount.\n"
            )
            self.qwarn = 1
        if (omega_hdm < 0.0):
            mylog.error(
                "TFmdm_set_cosm(): Negative omega_hdm set to trace amount.\n")
            self.qwarn = 1
        if (hubble <= 0.0):
            mylog.error(
                "TFmdm_set_cosm(): Negative Hubble constant illegal.\n")
            return None
        elif (hubble > 2.0):
            mylog.error(
                "TFmdm_set_cosm(): Hubble constant should be in units of 100 km/s/Mpc.\n"
            )
            self.qwarn = 1
        if (redshift <= -1.0):
            mylog.error("TFmdm_set_cosm(): Redshift < -1 is illegal.\n")
            return None
        elif (redshift > 99.0):
            mylog.error(
                "TFmdm_set_cosm(): Large redshift entered.  TF may be inaccurate.\n"
            )
            self.qwarn = 1

        if (degen_hdm < 1): degen_hdm = 1
        self.num_degen_hdm = degen_hdm
        # Have to save this for TFmdm_onek_mpc()
        # This routine would crash if baryons or neutrinos were zero,
        # so don't allow that.
        if (omega_baryon <= 0): omega_baryon = 1e-5
        if (omega_hdm <= 0): omega_hdm = 1e-5

        self.omega_curv = 1.0 - omega_matter - omega_lambda
        self.omhh = omega_matter * SQR(hubble)
        self.obhh = omega_baryon * SQR(hubble)
        self.onhh = omega_hdm * SQR(hubble)
        self.f_baryon = omega_baryon / omega_matter
        self.f_hdm = omega_hdm / omega_matter
        self.f_cdm = 1.0 - self.f_baryon - self.f_hdm
        self.f_cb = self.f_cdm + self.f_baryon
        self.f_bnu = self.f_baryon + self.f_hdm

        # Compute the equality scale.
        self.z_equality = 25000.0 * self.omhh / SQR(SQR(
            self.theta_cmb))  # Actually 1+z_eq
        self.k_equality = 0.0746 * self.omhh / SQR(self.theta_cmb)

        # Compute the drag epoch and sound horizon
        z_drag_b1 = 0.313 * math.pow(
            self.omhh, -0.419) * (1 + 0.607 * math.pow(self.omhh, 0.674))
        z_drag_b2 = 0.238 * math.pow(self.omhh, 0.223)
        self.z_drag = 1291*math.pow(self.omhh,0.251)/(1.0+0.659*math.pow(self.omhh,0.828))* \
            (1.0+z_drag_b1*math.pow(self.obhh,z_drag_b2))
        self.y_drag = self.z_equality / (1.0 + self.z_drag)

        self.sound_horizon_fit = 44.5 * math.log(
            9.83 / self.omhh) / math.sqrt(1.0 +
                                          10.0 * math.pow(self.obhh, 0.75))

        # Set up for the free-streaming & infall growth function
        self.p_c = 0.25 * (5.0 - math.sqrt(1 + 24.0 * self.f_cdm))
        self.p_cb = 0.25 * (5.0 - math.sqrt(1 + 24.0 * self.f_cb))

        omega_denom = omega_lambda+SQR(1.0+redshift)*(self.omega_curv+\
                omega_matter*(1.0+redshift))
        self.omega_lambda_z = omega_lambda / omega_denom
        self.omega_matter_z = omega_matter * SQR(1.0 + redshift) * (
            1.0 + redshift) / omega_denom
        self.growth_k0 = self.z_equality/(1.0+redshift)*2.5*self.omega_matter_z/ \
            (math.pow(self.omega_matter_z,4.0/7.0)-self.omega_lambda_z+ \
            (1.0+self.omega_matter_z/2.0)*(1.0+self.omega_lambda_z/70.0))
        self.growth_to_z0 = self.z_equality*2.5*omega_matter/(math.pow(omega_matter,4.0/7.0) \
            -omega_lambda + (1.0+omega_matter/2.0)*(1.0+omega_lambda/70.0))
        self.growth_to_z0 = self.growth_k0 / self.growth_to_z0

        # Compute small-scale suppression
        self.alpha_nu = self.f_cdm/self.f_cb*(5.0-2.*(self.p_c+self.p_cb))/(5.-4.*self.p_cb)* \
        math.pow(1+self.y_drag,self.p_cb-self.p_c)* \
        (1+self.f_bnu*(-0.553+0.126*self.f_bnu*self.f_bnu))/ \
        (1-0.193*math.sqrt(self.f_hdm*self.num_degen_hdm)+0.169*self.f_hdm*math.pow(self.num_degen_hdm,0.2))* \
        (1+(self.p_c-self.p_cb)/2*(1+1/(3.-4.*self.p_c)/(7.-4.*self.p_cb))/(1+self.y_drag))
        self.alpha_gamma = math.sqrt(self.alpha_nu)
        self.beta_c = 1 / (1 - 0.949 * self.f_bnu)
        # Done setting scalar variables
        self.hhubble = hubble  # Need to pass Hubble constant to TFmdm_onek_hmpc()
Beispiel #3
0
    def sigmaM(self):
        """
         Written by BWO, 2006 (updated 25 January 2007).
         Converted to Python by Stephen Skory December 2009.

         This routine takes in cosmological parameters and creates a file (array) with
         sigma(M) in it, which is necessary for various press-schechter type
         stuff.  In principle one can calculate it ahead of time, but it's far,
         far faster in the long run to calculate your sigma(M) ahead of time.
        
         Inputs: cosmology, user must set parameters
        
         Outputs: four columns of data containing the following information:

         1) mass (Msolar/h)
         2) sigma (normalized) using Msun/h as the input
         
         The arrays output are used later.
        """

        # Set up the transfer function object.
        self.TF = TransferFunction(self.omega_matter0, self.omega_baryon0, 0.0,
                                   0, self.omega_lambda0, self.hubble0,
                                   self.this_redshift)

        if self.TF.qwarn:
            mylog.error("You should probably fix your cosmology parameters!")

        # output arrays
        # 1) mass (M_solar/h), changed to M_solar/h at output
        self.masses_analytic = np.empty(self.num_sigma_bins, dtype='float64')
        # 2) sigma(M, z=0, where mass is in Msun/h)
        self.sigmaarray = np.empty(self.num_sigma_bins, dtype='float64')

        # get sigma_8 normalization
        R = 8.0
        # in units of Mpc/h (comoving)

        sigma8_unnorm = math.sqrt(self.sigma_squared_of_R(R))
        sigma_normalization = self.sigma8 / sigma8_unnorm

        # rho0 in units of h^2 Msolar/Mpc^3
        rho0 = YTQuantity(
            self.omega_matter0 * rho_crit_g_cm3_h2 * self.hubble0**2,
            'g/cm**3').in_units('Msun/Mpc**3')
        rho0 = rho0.value.item()

        # spacing in mass of our sigma calculation
        dm = (float(self.log_mass_max) -
              self.log_mass_min) / self.num_sigma_bins
        """
         loop over the total number of sigma_bins the user has requested. 
         For each bin, calculate mass and equivalent radius, and call
         sigma_squared_of_R to get the sigma(R) (equivalent to sigma(M)),
         normalize by user-specified sigma_8, and then write out.
        """
        for i in xrange(self.num_sigma_bins):

            # thislogmass is in units of Msolar, NOT Msolar/h
            thislogmass = self.log_mass_min + i * dm

            # mass in units of h^-1 Msolar
            thismass = math.pow(10.0, thislogmass) * self.hubble0

            # radius is in units of h^-1 Mpc (comoving)
            thisradius = math.pow(3.0 * thismass / 4.0 / math.pi / rho0,
                                  1.0 / 3.0)

            R = thisradius
            # h^-1 Mpc (comoving)

            self.masses_analytic[i] = thismass
            # Msun/h

            # get normalized sigma(R)
            self.sigmaarray[i] = math.sqrt(
                self.sigma_squared_of_R(R)) * sigma_normalization
Beispiel #4
0
def load(*args, **kwargs):
    """
    This function attempts to determine the base data type of a filename or
    other set of arguments by calling
    :meth:`yt.data_objects.static_output.Dataset._is_valid` until it finds a
    match, at which point it returns an instance of the appropriate
    :class:`yt.data_objects.static_output.Dataset` subclass.
    """
    args = _sanitize_load_args(*args)
    candidates = []
    valid_file = []
    for argno, arg in enumerate(args):
        if isinstance(arg, str):
            if os.path.exists(arg):
                valid_file.append(True)
            elif arg.startswith("http"):
                valid_file.append(True)
            else:
                if os.path.exists(
                        os.path.join(ytcfg.get("yt", "test_data_dir"), arg)):
                    valid_file.append(True)
                    args[argno] = os.path.join(
                        ytcfg.get("yt", "test_data_dir"), arg)
                else:
                    valid_file.append(False)
        else:
            valid_file.append(False)
    types_to_check = output_type_registry
    if not any(valid_file):
        try:
            from yt.data_objects.time_series import DatasetSeries

            ts = DatasetSeries(*args, **kwargs)
            return ts
        except (TypeError, OSError, YTOutputNotIdentified):
            pass
        # We check if either the first argument is a dict or list, in which
        # case we try identifying candidates.
        if len(args) > 0 and isinstance(args[0], (list, dict)):
            # This fixes issues where it is assumed the first argument is a
            # file
            types_to_check = dict((n, v)
                                  for n, v in output_type_registry.items()
                                  if n.startswith("stream_"))
            # Better way to do this is to override the output_type_registry
        else:
            mylog.error(
                "None of the arguments provided to load() is a valid file")
            mylog.error("Please check that you have used a correct path")
            raise YTOutputNotIdentified(args, kwargs)
    for n, c in types_to_check.items():
        if n is None:
            continue
        if c._is_valid(*args, **kwargs):
            candidates.append(n)

    # convert to classes
    candidates = [output_type_registry[c] for c in candidates]
    # Find only the lowest subclasses, i.e. most specialised front ends
    candidates = find_lowest_subclasses(candidates)
    if len(candidates) == 1:
        return candidates[0](*args, **kwargs)
    if len(candidates) == 0:
        if (ytcfg.get("yt", "enzo_db") != "" and len(args) == 1
                and isinstance(args[0], str)):
            erdb = EnzoRunDatabase()
            fn = erdb.find_uuid(args[0])
            n = "EnzoDataset"
            if n in output_type_registry and output_type_registry[n]._is_valid(
                    fn):
                return output_type_registry[n](fn)
        mylog.error("Couldn't figure out output type for %s", args[0])
        raise YTOutputNotIdentified(args, kwargs)

    mylog.error("Multiple output type candidates for %s:", args[0])
    for c in candidates:
        mylog.error("    Possible: %s", c)
    raise YTOutputNotIdentified(args, kwargs)
Beispiel #5
0
    def _detect_output_fields(self):
        field_map = {}
        f = open(self.index_filename, "rb")
        line = check_readline(f)
        chkwhile = chk23("")
        while line != chkwhile:
            splitup = line.strip().split()
            chkd = chk23("DIMENSIONS")
            chkc = chk23("CELL_DATA")
            chkp = chk23("POINT_DATA")
            if chkd in splitup:
                field = str23(splitup[-3:])
                grid_dims = np.array(field).astype("int")
                line = check_readline(f)
            elif chkc in splitup or chkp in splitup:
                grid_ncells = int(str23(splitup[-1]))
                line = check_readline(f)
                if np.prod(grid_dims) != grid_ncells:
                    grid_dims -= 1
                    grid_dims[grid_dims == 0] = 1
                if np.prod(grid_dims) != grid_ncells:
                    mylog.error(
                        "product of dimensions %i not equal to number of cells %i"
                        % (np.prod(grid_dims), grid_ncells)
                    )
                    raise TypeError
                break
            else:
                line = check_readline(f)
        read_table = False
        read_table_offset = f.tell()
        while line != chkwhile:
            splitup = line.strip().split()
            chks = chk23("SCALARS")
            chkv = chk23("VECTORS")
            if chks in line and chks not in splitup:
                splitup = str23(line[line.find(chks) :].strip().split())
            if chkv in line and chkv not in splitup:
                splitup = str23(line[line.find(chkv) :].strip().split())
            if chks in splitup:
                field = ("athena", str23(splitup[1]))
                dtype = str23(splitup[-1]).lower()
                if not read_table:
                    line = check_readline(f)  # Read the lookup table line
                    read_table = True
                field_map[field] = ("scalar", f.tell() - read_table_offset, dtype)
                read_table = False
            elif chkv in splitup:
                field = str23(splitup[1])
                dtype = str23(splitup[-1]).lower()
                for ax in "xyz":
                    field_map[("athena", "%s_%s" % (field, ax))] = (
                        "vector",
                        f.tell() - read_table_offset,
                        dtype,
                    )
            line = check_readline(f)

        f.close()

        self.field_list = list(field_map.keys())
        self._field_map = field_map
Beispiel #6
0
    def off_axis(self,
                 L,
                 center="c",
                 width=(1, "unitary"),
                 nx=800,
                 source=None):
        r""" Make an off-axis projection of the SZ signal.

        Parameters
        ----------
        L : array_like
            The normal vector of the projection.
        center : array_like or string, optional
            The center of the projection.
        width : float or tuple
            The width of the projection.
        nx : integer, optional
            The dimensions on a side of the projection image.
        source : yt.data_objects.api.AMRData, optional
            If specified, this will be the data source used for selecting regions to project.
            Currently unsupported in yt 2.x.

        Examples
        --------
        >>> L = np.array([0.5, 1.0, 0.75])
        >>> szprj.off_axis(L, center="c", width=(2.0, "Mpc"))
        """
        if iterable(width):
            w = self.ds.quan(width[0], width[1]).in_units("code_length").value
        elif isinstance(width, YTQuantity):
            w = width.in_units("code_length").value
        else:
            w = width
        if center == "c":
            ctr = self.ds.domain_center
        elif center == "max":
            v, ctr = self.ds.find_max("density")
        else:
            ctr = center

        if source is not None:
            mylog.error(
                "Source argument is not currently supported for off-axis S-Z projections."
            )
            raise NotImplementedError

        beta_par = generate_beta_par(L)
        self.ds.add_field(("gas", "beta_par"),
                          function=beta_par,
                          units="g/cm**3")
        setup_sunyaev_zeldovich_fields(self.ds)

        dens = off_axis_projection(self.ds, ctr, L, w, nx, "density")
        Te = off_axis_projection(self.ds, ctr, L, w, nx, "t_sz") / dens
        bpar = off_axis_projection(self.ds, ctr, L, w, nx, "beta_par") / dens
        omega1 = off_axis_projection(self.ds, ctr, L, w, nx,
                                     "t_squared") / dens
        omega1 = omega1 / (Te * Te) - 1.
        if self.high_order:
            bperp2 = off_axis_projection(self.ds, ctr, L, w, nx,
                                         "beta_perp_squared") / dens
            sigma1 = off_axis_projection(self.ds, ctr, L, w, nx,
                                         "t_beta_par") / dens
            sigma1 = sigma1 / Te - bpar
            kappa1 = off_axis_projection(self.ds, ctr, L, w, nx,
                                         "beta_par_squared") / dens
            kappa1 -= bpar
        else:
            bperp2 = np.zeros((nx, nx))
            sigma1 = np.zeros((nx, nx))
            kappa1 = np.zeros((nx, nx))
        tau = sigma_thompson * dens * self.mueinv / mh

        self.bounds = np.array([-0.5 * w, 0.5 * w, -0.5 * w, 0.5 * w])
        self.dx = w / nx
        self.dy = w / nx
        self.nx = nx

        self._compute_intensity(np.array(tau), np.array(Te), np.array(bpar),
                                np.array(omega1), np.array(sigma1),
                                np.array(kappa1), np.array(bperp2))

        self.ds.field_info.pop(("gas", "beta_par"))
Beispiel #7
0
    def __init__(
        self,
        filename,
        dataset_type="amrvac",
        units_override=None,
        unit_system="cgs",
        geometry_override=None,
        parfiles=None,
    ):
        """Instanciate AMRVACDataset.

        Parameters
        ----------
        filename : str
            Path to a datfile.

        dataset_type : str, optional
            This should always be 'amrvac'.

        units_override : dict, optional
            A dictionnary of physical normalisation factors to interpret on disk data.

        unit_system : str, optional
            Either "cgs" (default), "mks" or "code"

        geometry_override : str, optional
            A geometry flag formatted either according to either AMRVAC's or yt's standards.
            When this parameter is passed along with v5 or more newer datfiles, will precede over
            their internal "geometry" tag.

        parfiles : str or list, optional
            One or more parfiles to be passed to yt.frontends.amrvac.read_amrvac_parfiles()

        """
        # note: geometry_override and parfiles are specific to this frontend

        self._geometry_override = geometry_override
        super(AMRVACDataset, self).__init__(
            filename,
            dataset_type,
            units_override=units_override,
            unit_system=unit_system,
        )

        self._parfiles = parfiles

        namelist = None
        namelist_gamma = None
        c_adiab = None
        e_is_internal = None
        if parfiles is not None:
            namelist = read_amrvac_namelist(parfiles)
            if "hd_list" in namelist:
                c_adiab = namelist["hd_list"].get("hd_adiab", 1.0)
                namelist_gamma = namelist["hd_list"].get("hd_gamma")
            elif "mhd_list" in namelist:
                c_adiab = namelist["mhd_list"].get("mhd_adiab", 1.0)
                namelist_gamma = namelist["mhd_list"].get("mhd_gamma")

            if namelist_gamma is not None and self.gamma != namelist_gamma:
                mylog.error(
                    "Inconsistent values in gamma: datfile %s, parfiles %s",
                    self.gamma,
                    namelist_gamma,
                )

            if "method_list" in namelist:
                e_is_internal = namelist["method_list"].get(
                    "solve_internal_e", False)

        if c_adiab is not None:
            # this complicated unit is required for the adiabatic equation of state to make physical sense
            c_adiab *= (self.mass_unit**(1 - self.gamma) *
                        self.length_unit**(2 + 3 * (self.gamma - 1)) /
                        self.time_unit**2)

        self.namelist = namelist
        self._c_adiab = c_adiab
        self._e_is_internal = e_is_internal

        self.fluid_types += ("amrvac", )
        # refinement factor between a grid and its subgrid
        self.refine_by = 2
Beispiel #8
0
    def _parse_parameter_file(self):
        """Parse input datfile's header. Apply geometry_override if specified."""
        # required method
        self.unique_identifier = int(
            os.stat(self.parameter_filename)[stat.ST_CTIME])

        # populate self.parameters with header data
        with open(self.parameter_filename, 'rb') as istream:
            self.parameters.update(get_header(istream))

        self.current_time = self.parameters['time']
        self.dimensionality = self.parameters['ndim']

        # force 3D for this definition
        dd = np.ones(3, dtype="int64")
        dd[:self.dimensionality] = self.parameters['domain_nx']
        self.domain_dimensions = dd

        # the following parameters may not be present in the datfile,
        # dependending on format version
        if self.parameters["datfile_version"] < 5:
            mylog.warning(
                "This data format does not contain geometry or periodicity info"
            )
        if self.parameters.get("staggered", False):
            mylog.warning(
                "'staggered' flag was found, but is currently ignored (unsupported)"
            )

        # parse geometry
        # by order of decreasing priority, we use
        # - geometry_override
        # - "geometry" parameter from datfile
        # - if all fails, default to "cartesian"
        geom_candidates = {"param": None, "override": None}
        amrvac_geom = self.parameters.get("geometry", None)
        if amrvac_geom is None:
            mylog.warning(
                "Could not find a 'geometry' parameter in source file.")
        else:
            geom_candidates.update(
                {"param": self._parse_geometry(amrvac_geom)})

        if self._geometry_override is not None:
            try:
                geom_candidates.update({
                    "override":
                    self._parse_geometry(self._geometry_override)
                })
            except ValueError:
                mylog.error(
                    "Unknown value for geometry_override (will be ignored).")

        if geom_candidates["override"] is not None:
            mylog.warning(
                "Using override geometry, this may lead to surprising results for inappropriate values."
            )
            self.geometry = geom_candidates["override"]
        elif geom_candidates["param"] is not None:
            mylog.info("Using parameter geometry")
            self.geometry = geom_candidates["param"]
        else:
            mylog.warning(
                "No geometry parameter supplied or found, defaulting to cartesian."
            )
            self.geometry = "cartesian"

        # parse peridiocity
        per = self.parameters.get("periodic", np.array([False, False, False]))
        missing_dim = 3 - len(per)
        self.periodicity = np.append(per, [False] * missing_dim)

        self.gamma = self.parameters.get("gamma", 5.0 / 3.0)

        # parse domain edges
        dle = np.zeros(3)
        dre = np.ones(3)
        dle[:self.dimensionality] = self.parameters['xmin']
        dre[:self.dimensionality] = self.parameters['xmax']
        self.domain_left_edge = dle
        self.domain_right_edge = dre

        # defaulting to non-cosmological
        self.cosmological_simulation = 0
        self.current_redshift = 0.0
        self.omega_matter = 0.0
        self.omega_lambda = 0.0
        self.hubble_constant = 0.0
    def calculate_light_cone_solution(self, seed=None, filename=None):
        r"""Create list of projections to be added together to make the light cone.

        Several sentences providing an extended description. Refer to
        variables using back-ticks, e.g. `var`.

        Parameters
        ----------
        seed : int
            The seed for the random number generator.  Any light cone solution
            can be reproduced by giving the same random seed.  Default: None
            (each solution will be distinct).
        filename : string
            If given, a text file detailing the solution will be written out.
            Default: None.

        """

        # Don"t use box coherence with maximum projection depths.
        if self.use_minimum_datasets and \
                self.minimum_coherent_box_fraction > 0:
            mylog.info("Setting minimum_coherent_box_fraction to 0 with " +
                       "minimal light cone.")
            self.minimum_coherent_box_fraction = 0

        # Calculate projection sizes, and get
        # random projection axes and centers.
        seed = int(seed)
        np.random.seed(seed)

        # For box coherence, keep track of effective depth travelled.
        box_fraction_used = 0.0

        for q in range(len(self.light_cone_solution)):
            if "previous" in self.light_cone_solution[q]:
                del self.light_cone_solution[q]["previous"]
            if "next" in self.light_cone_solution[q]:
                del self.light_cone_solution[q]["next"]
            if q == len(self.light_cone_solution) - 1:
                z_next = self.near_redshift
            else:
                z_next = self.light_cone_solution[q+1]["redshift"]

            # Calculate fraction of box required for a depth of delta z
            self.light_cone_solution[q]["box_depth_fraction"] = \
                (self.cosmology.comoving_radial_distance(z_next, \
                        self.light_cone_solution[q]["redshift"]) / \
                        self.simulation.box_size).in_units("")

            # Calculate fraction of box required for width corresponding to
            # requested image size.
            proper_box_size = self.simulation.box_size / \
              (1.0 + self.light_cone_solution[q]["redshift"])
            self.light_cone_solution[q]["box_width_per_angle"] = \
              (self.cosmology.angular_scale(self.observer_redshift,
               self.light_cone_solution[q]["redshift"]) /
               proper_box_size).in_units("1 / degree")

            # Simple error check to make sure more than 100% of box depth
            # is never required.
            if self.light_cone_solution[q]["box_depth_fraction"] > 1.0:
                mylog.error(("Warning: box fraction required to go from " +
                             "z = %f to %f is %f") %
                            (self.light_cone_solution[q]["redshift"], z_next,
                             self.light_cone_solution[q]["box_depth_fraction"]))
                mylog.error(("Full box delta z is %f, but it is %f to the " +
                             "next data dump.") %
                            (self.light_cone_solution[q]["dz_max"],
                             self.light_cone_solution[q]["redshift"]-z_next))

            # Get projection axis and center.
            # If using box coherence, only get random axis and center if enough
            # of the box has been used, or if box_fraction_used will be greater
            # than 1 after this slice.
            if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
              (box_fraction_used > self.minimum_coherent_box_fraction) or \
              (box_fraction_used +
               self.light_cone_solution[q]["box_depth_fraction"] > 1.0):
                # Random axis and center.
                self.light_cone_solution[q]["projection_axis"] = \
                  np.random.randint(0, 3)
                self.light_cone_solution[q]["projection_center"] = \
                  np.random.random(3)
                box_fraction_used = 0.0
            else:
                # Same axis and center as previous slice,
                # but with depth center shifted.
                self.light_cone_solution[q]["projection_axis"] = \
                  self.light_cone_solution[q-1]["projection_axis"]
                self.light_cone_solution[q]["projection_center"] = \
                  self.light_cone_solution[q-1]["projection_center"].copy()
                self.light_cone_solution[q]["projection_center"]\
                  [self.light_cone_solution[q]["projection_axis"]] += \
                    0.5 * (self.light_cone_solution[q]["box_depth_fraction"] +
                           self.light_cone_solution[q-1]["box_depth_fraction"])
                if self.light_cone_solution[q]["projection_center"]\
                  [self.light_cone_solution[q]["projection_axis"]] >= 1.0:
                    self.light_cone_solution[q]["projection_center"]\
                      [self.light_cone_solution[q]["projection_axis"]] -= 1.0

            box_fraction_used += self.light_cone_solution[q]["box_depth_fraction"]

        # Write solution to a file.
        if filename is not None:
            self._save_light_cone_solution(filename=filename)
Beispiel #10
0
    def set_pdf_params(self, bin_type="lin", bin_number=1000, bin_range=None):
        r"""Set the parameters used to build the Probability Distribution Function
        for each ruler length for this function. The values output by the
        function are slotted into the bins described here.
        
        Parameters
        ----------
        bin_type : String
            Controls the edges of the bins spaced evenly in
            logarithmic or linear space, set by "log" or "lin", respectively.
            A single string, or list of strings for N-dim binning.
            Default = "lin".
        bin_number : Integer
            Sets how many bins to create, evenly spaced by the above
            parameter. A single integer, or a list of integers for N-dim
            binning. Default = 1000.
        bin_range : Float
            A pair of values giving the range for the bins.
            A pair of floats (a list), or a list of pairs for N-dim binning.
            Default = None.

        Examples
        --------
        >>> f1.set_pdf_params(bin_type='log', bin_range=[5e4, 5.5e13],
        ... bin_number=1000)
        """
        # This should be called after setSearchParams.
        if not hasattr(self.tpf, "lengths"):
            mylog.error(
                "Please call setSearchParams() before calling setPDFParams().")
            return None
        # Make sure they're either all lists or only one is.
        input = [bin_type, bin_number, bin_range]
        lists = 0
        for thing in input:
            if type(thing) == list:
                lists += 1
        if lists > 1 and lists < 3:
            mylog.error("Either all the inputs need to be lists, or only one.")
            return None
        # Make sure they're all the same length if they're lists.
        if lists == 3:
            first_len = 0
            for thing in input:
                if first_len == 0:
                    first_len = len(thing)
                    if first_len == 0:
                        mylog.error("Input cannot be an empty list.")
                        return None
                    continue
                if first_len != len(thing):
                    mylog.error("All the inputs need to have the same length.")
                    return None
        # If they are not all lists, put the input into lists for convenience.
        if lists == 1:
            bin_type, bin_number = [bin_type], [bin_number]
            bin_range = [bin_range]
        self.bin_type = bin_type
        self.bin_number = np.array(bin_number) - 1
        self.dims = range(len(bin_type))
        # Create the dict that stores the arrays to store the bin hits, and
        # the arrays themselves.
        self.length_bin_hits = {}
        for length in self.tpf.lengths:
            # It's easier to index flattened, but will be unflattened later.
            self.length_bin_hits[length] = np.zeros(self.bin_number,
                                                    dtype='int64').flatten()
        # Create the bin edges for each dimension.
        # self.bins is indexed by dimension
        self.bin_edges = {}
        for dim in self.dims:
            # Error check.
            if len(bin_range[dim]) != 2:
                raise ValueError("bin_range must have two values.")
            if bin_range[dim][1] <= bin_range[dim][0]:
                raise ValueError(
                    "bin_range[1] must be larger than bin_range[0]")
            # Make the edges for this dimension.
            if bin_type[dim] == "lin":
                self.bin_edges[dim] = np.linspace(bin_range[dim][0],
                                                  bin_range[dim][1],
                                                  bin_number[dim])
            elif bin_type[dim] == "log":
                self.bin_edges[dim] = np.logspace(
                    math.log10(bin_range[dim][0]),
                    math.log10(bin_range[dim][1]), bin_number[dim])
            else:
                raise SyntaxError("bin_edges is either \"lin\" or \"log\".")
Beispiel #11
0
 def run_generator(self):
     r"""After all the functions have been added, run the generator.
     
     Examples
     --------
     >>> tpf.run_generator()
     """
     yt_counters("run_generator")
     # We need a function!
     if len(self._fsets) == 0:
         mylog.error("You need to add at least one function!")
         return None
     # Do all the startup tasks to get the grid points.
     if self.nlevels == 0:
         yt_counters("build_sort")
         self._build_sort_array()
         self.sort_done = False
         yt_counters("build_sort")
     else:
         yt_counters("init_kd_tree")
         self._init_kd_tree()
         self.sort_done = True
         yt_counters("init_kd_tree")
     # Store the fields.
     self.stored_fields = {}
     yt_counters("getting data")
     for field in self.fields:
         self.stored_fields[field] = self.ds[field].copy()
     self.ds.clear_data()
     # If the arrays haven't been sorted yet and need to be, do that.
     if not self.sort_done:
         for field in self.fields:
             self.stored_fields[field] = self.stored_fields[field][
                 self.sort]
         del self.sort
         self.sort_done = True
     yt_counters("getting data")
     self._build_fields_vals()
     yt_counters("big loop over lengths")
     t_waiting = 0.
     for bigloop, length in enumerate(self.lengths):
         self._build_points_array()
         if self.mine == 0:
             mylog.info("Doing length %1.5e" % length)
         # Things stop when this value below equals total_values.
         self.generated_points = 0
         self.gen_array = np.zeros(self.size, dtype='int64')
         self.comm_cycle_count = 0
         self.final_comm_cycle_count = 0
         self.sent_done = False
         self._setup_done_hooks_on_root()
         # While everyone else isn't done or I'm not done, we loop.
         while self._should_cycle():
             self._setup_recv_arrays()
             self._send_arrays()
             t0 = time.time()
             self.comm.mpi_Request_Waitall(self.send_hooks)
             self.comm.mpi_Request_Waitall(self.recv_hooks)
             t1 = time.time()
             t_waiting += (t1 - t0)
             if (self.recv_points < -1.).any() or (self.recv_points >
                                                   1.).any():  # or \
                 #(np.abs(np.log10(np.abs(self.recv_points))) > 20).any():
                 raise ValueError("self.recv_points is no good!")
             self.points = self.recv_points.copy()
             self.fields_vals = self.recv_fields_vals.copy()
             self.gen_array = self.recv_gen_array.copy()
             self._eval_points(length)
             self.gen_array[self.mine] = self.generated_points
             self.comm_cycle_count += 1
             if self.generated_points == self.total_values:
                 self._send_done_to_root()
         if self.mine == 0:
             mylog.info("Length (%d of %d) %1.5e took %d communication cycles to complete." % \
             (bigloop+1, len(self.lengths), length, self.comm_cycle_count))
     yt_counters("big loop over lengths")
     if self.nlevels >= 1:
         del fKD.pos, fKD.qv_many, fKD.nn_tags
         free_tree(0)  # Frees the kdtree object.
     yt_counters("allsum")
     self._allsum_bin_hits()
     mylog.info("Spent %f seconds waiting for communication." % t_waiting)
     yt_counters("allsum")
     yt_counters("run_generator")
Beispiel #12
0
    def calculate_light_cone_solution(self, seed=None, filename=None):
        r"""Create list of projections to be added together to make the light cone.

        Several sentences providing an extended description. Refer to
        variables using back-ticks, e.g. `var`.

        Parameters
        ----------
        seed : int
            The seed for the random number generator.  Any light cone solution
            can be reproduced by giving the same random seed.  Default: None
            (each solution will be distinct).
        filename : string
            If given, a text file detailing the solution will be written out.
            Default: None.

        """

        # Don"t use box coherence with maximum projection depths.
        if self.use_minimum_datasets and \
                self.minimum_coherent_box_fraction > 0:
            mylog.info("Setting minimum_coherent_box_fraction to 0 with " +
                       "minimal light cone.")
            self.minimum_coherent_box_fraction = 0

        # Calculate projection sizes, and get
        # random projection axes and centers.
        seed = int(seed)
        np.random.seed(seed)

        # For box coherence, keep track of effective depth travelled.
        box_fraction_used = 0.0

        for q in range(len(self.light_cone_solution)):
            if "previous" in self.light_cone_solution[q]:
                del self.light_cone_solution[q]["previous"]
            if "next" in self.light_cone_solution[q]:
                del self.light_cone_solution[q]["next"]
            if q == len(self.light_cone_solution) - 1:
                z_next = self.near_redshift
            else:
                z_next = self.light_cone_solution[q + 1]["redshift"]

            # Calculate fraction of box required for a depth of delta z
            self.light_cone_solution[q]["box_depth_fraction"] = \
                (self.cosmology.comoving_radial_distance(z_next, \
                        self.light_cone_solution[q]["redshift"]) / \
                        self.simulation.box_size).in_units("")

            # Calculate fraction of box required for width corresponding to
            # requested image size.
            proper_box_size = self.simulation.box_size / \
              (1.0 + self.light_cone_solution[q]["redshift"])
            self.light_cone_solution[q]["box_width_per_angle"] = \
              (self.cosmology.angular_scale(self.observer_redshift,
               self.light_cone_solution[q]["redshift"]) /
               proper_box_size).in_units("1 / degree")

            # Simple error check to make sure more than 100% of box depth
            # is never required.
            if self.light_cone_solution[q]["box_depth_fraction"] > 1.0:
                mylog.error(
                    ("Warning: box fraction required to go from " +
                     "z = %f to %f is %f") %
                    (self.light_cone_solution[q]["redshift"], z_next,
                     self.light_cone_solution[q]["box_depth_fraction"]))
                mylog.error(("Full box delta z is %f, but it is %f to the " +
                             "next data dump.") %
                            (self.light_cone_solution[q]["dz_max"],
                             self.light_cone_solution[q]["redshift"] - z_next))

            # Get projection axis and center.
            # If using box coherence, only get random axis and center if enough
            # of the box has been used, or if box_fraction_used will be greater
            # than 1 after this slice.
            if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
              (box_fraction_used > self.minimum_coherent_box_fraction) or \
              (box_fraction_used +
               self.light_cone_solution[q]["box_depth_fraction"] > 1.0):
                # Random axis and center.
                self.light_cone_solution[q]["projection_axis"] = \
                  np.random.randint(0, 3)
                self.light_cone_solution[q]["projection_center"] = \
                  np.random.random(3)
                box_fraction_used = 0.0
            else:
                # Same axis and center as previous slice,
                # but with depth center shifted.
                self.light_cone_solution[q]["projection_axis"] = \
                  self.light_cone_solution[q-1]["projection_axis"]
                self.light_cone_solution[q]["projection_center"] = \
                  self.light_cone_solution[q-1]["projection_center"].copy()
                self.light_cone_solution[q]["projection_center"]\
                  [self.light_cone_solution[q]["projection_axis"]] += \
                    0.5 * (self.light_cone_solution[q]["box_depth_fraction"] +
                           self.light_cone_solution[q-1]["box_depth_fraction"])
                if self.light_cone_solution[q]["projection_center"]\
                  [self.light_cone_solution[q]["projection_axis"]] >= 1.0:
                    self.light_cone_solution[q]["projection_center"]\
                      [self.light_cone_solution[q]["projection_axis"]] -= 1.0

            box_fraction_used += self.light_cone_solution[q][
                "box_depth_fraction"]

        # Write solution to a file.
        if filename is not None:
            self._save_light_cone_solution(filename=filename)
    def off_axis(self, L, center="c", width=(1, "unitary"), nx=800, source=None):
        r""" Make an off-axis projection of the SZ signal.

        Parameters
        ----------
        L : array_like
            The normal vector of the projection.
        center : A sequence of floats, a string, or a tuple.
            The coordinate of the center of the image. If set to 'c', 'center' or
            left blank, the plot is centered on the middle of the domain. If set to
            'max' or 'm', the center will be located at the maximum of the
            ('gas', 'density') field. Centering on the max or min of a specific
            field is supported by providing a tuple such as ("min","temperature") or
            ("max","dark_matter_density"). Units can be specified by passing in *center*
            as a tuple containing a coordinate and string unit name or by passing
            in a YTArray. If a list or unitless array is supplied, code units are
            assumed.
        width : float, tuple, or YTQuantity.
            The width of the projection. A float will assume the width is in code units.
            A (value, unit) tuple or YTQuantity allows for the units of the width to be specified.
        nx : integer, optional
            The dimensions on a side of the projection image.
        source : yt.data_objects.data_containers.YTSelectionContainer, optional
            If specified, this will be the data source used for selecting regions to project.
            Currently unsupported in yt 2.x.

        Examples
        --------
        >>> L = np.array([0.5, 1.0, 0.75])
        >>> szprj.off_axis(L, center="c", width=(2.0, "Mpc"))
        """
        if iterable(width):
            w = self.ds.quan(width[0], width[1]).in_units("code_length").value
        elif isinstance(width, YTQuantity):
            w = width.in_units("code_length").value
        else:
            w = width
        ctr, dctr = self.ds.coordinates.sanitize_center(center, L)

        if source is not None:
            mylog.error("Source argument is not currently supported for off-axis S-Z projections.")
            raise NotImplementedError

        beta_par = generate_beta_par(L)
        self.ds.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
        setup_sunyaev_zeldovich_fields(self.ds)

        dens    = off_axis_projection(self.ds, ctr, L, w, nx, "density")
        Te      = off_axis_projection(self.ds, ctr, L, w, nx, "t_sz")/dens
        bpar    = off_axis_projection(self.ds, ctr, L, w, nx, "beta_par")/dens
        omega1  = off_axis_projection(self.ds, ctr, L, w, nx, "t_squared")/dens
        omega1  = omega1/(Te*Te) - 1.
        if self.high_order:
            bperp2  = off_axis_projection(self.ds, ctr, L, w, nx, "beta_perp_squared")/dens
            sigma1  = off_axis_projection(self.ds, ctr, L, w, nx, "t_beta_par")/dens
            sigma1  = sigma1/Te - bpar
            kappa1  = off_axis_projection(self.ds, ctr, L, w, nx, "beta_par_squared")/dens
            kappa1 -= bpar
        else:
            bperp2 = np.zeros((nx,nx))
            sigma1 = np.zeros((nx,nx))
            kappa1 = np.zeros((nx,nx))
        tau = sigma_thompson*dens*self.mueinv/mh

        self.bounds = np.array([-0.5*w, 0.5*w, -0.5*w, 0.5*w])
        self.dx = w/nx
        self.dy = w/nx
        self.nx = nx

        self._compute_intensity(np.array(tau), np.array(Te), np.array(bpar),
                                np.array(omega1), np.array(sigma1),
                                np.array(kappa1), np.array(bperp2))

        self.ds.field_info.pop(("gas","beta_par"))
Beispiel #14
0
    def _calculate_light_ray_solution(self, seed=None, 
                                      start_position=None, end_position=None,
                                      trajectory=None, filename=None):
        "Create list of datasets to be added together to make the light ray."

        # Calculate dataset sizes, and get random dataset axes and centers.
        np.random.seed(seed)

        # If using only one dataset, set start and stop manually.
        if start_position is not None:
            if len(self.light_ray_solution) > 1:
                raise RuntimeError("LightRay Error: cannot specify start_position " + \
                                   "if light ray uses more than one dataset.")
            if not ((end_position is None) ^ (trajectory is None)):
                raise RuntimeError("LightRay Error: must specify either end_position " + \
                                   "or trajectory, but not both.")
            self.light_ray_solution[0]['start'] = np.array(start_position)
            if end_position is not None:
                self.light_ray_solution[0]['end'] = np.array(end_position)
            else:
                # assume trajectory given as r, theta, phi
                if len(trajectory) != 3:
                    raise RuntimeError("LightRay Error: trajectory must have length 3.")
                r, theta, phi = trajectory
                self.light_ray_solution[0]['end'] = self.light_ray_solution[0]['start'] + \
                  r * np.array([np.cos(phi) * np.sin(theta),
                                np.sin(phi) * np.sin(theta),
                                np.cos(theta)])
            self.light_ray_solution[0]['traversal_box_fraction'] = \
              vector_length(self.light_ray_solution[0]['start'], 
                            self.light_ray_solution[0]['end'])

        # the normal way (random start positions and trajectories for each dataset)
        else:
            
            # For box coherence, keep track of effective depth travelled.
            box_fraction_used = 0.0

            for q in range(len(self.light_ray_solution)):
                if (q == len(self.light_ray_solution) - 1):
                    z_next = self.near_redshift
                else:
                    z_next = self.light_ray_solution[q+1]['redshift']

                # Calculate fraction of box required for a depth of delta z
                self.light_ray_solution[q]['traversal_box_fraction'] = \
                    self.cosmology.comoving_radial_distance(z_next, \
                        self.light_ray_solution[q]['redshift']).in_units("Mpccm / h") / \
                        self.simulation.box_size

                # Simple error check to make sure more than 100% of box depth
                # is never required.
                if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
                    mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
                                (self.light_ray_solution[q]['redshift'], z_next,
                                 self.light_ray_solution[q]['traversal_box_fraction']))
                    mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
                                (self.light_ray_solution[q]['dz_max'],
                                 self.light_ray_solution[q]['redshift']-z_next))

                # Get dataset axis and center.
                # If using box coherence, only get start point and vector if
                # enough of the box has been used,
                # or if box_fraction_used will be greater than 1 after this slice.
                if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
                        (box_fraction_used >
                         self.minimum_coherent_box_fraction) or \
                        (box_fraction_used +
                         self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
                    # Random start point
                    self.light_ray_solution[q]['start'] = np.random.random(3)
                    theta = np.pi * np.random.random()
                    phi = 2 * np.pi * np.random.random()
                    box_fraction_used = 0.0
                else:
                    # Use end point of previous segment and same theta and phi.
                    self.light_ray_solution[q]['start'] = \
                      self.light_ray_solution[q-1]['end'][:]

                self.light_ray_solution[q]['end'] = \
                  self.light_ray_solution[q]['start'] + \
                    self.light_ray_solution[q]['traversal_box_fraction'] * \
                    np.array([np.cos(phi) * np.sin(theta),
                              np.sin(phi) * np.sin(theta),
                              np.cos(theta)])
                box_fraction_used += \
                  self.light_ray_solution[q]['traversal_box_fraction']

        if filename is not None:
            self._write_light_ray_solution(filename,
                extra_info={'parameter_filename':self.parameter_filename,
                            'random_seed':seed,
                            'far_redshift':self.far_redshift,
                            'near_redshift':self.near_redshift})
    def create_cosmology_splice(self, near_redshift, far_redshift,
                                minimal=True, max_box_fraction=1.0,
                                deltaz_min=0.0,
                                time_data=True, redshift_data=True):
        r"""Create list of datasets capable of spanning a redshift
        interval.

        For cosmological simulations, the physical width of the simulation
        box corresponds to some \Delta z, which varies with redshift.
        Using this logic, one can stitch together a series of datasets to
        create a continuous volume or length element from one redshift to
        another. This method will return such a list

        Parameters
        ----------
        near_redshift : float
            The nearest (lowest) redshift in the cosmology splice list.
        far_redshift : float
            The furthest (highest) redshift in the cosmology splice list.
        minimal : bool
            If True, the minimum number of datasets is used to connect the
            initial and final redshift.  If false, the list will contain as
            many entries as possible within the redshift
            interval.
            Default: True.
        max_box_fraction : float
            In terms of the size of the domain, the maximum length a light
            ray segment can be in order to span the redshift interval from
            one dataset to another.  If using a zoom-in simulation, this
            parameter can be set to the length of the high resolution
            region so as to limit ray segments to that size.  If the
            high resolution region is not cubical, the smallest side
            should be used.
            Default: 1.0 (the size of the box)
        deltaz_min : float
            Specifies the minimum delta z between consecutive datasets
            in the returned
            list.
            Default: 0.0.
        time_data : bool
            Whether or not to include time outputs when gathering
            datasets for time series.
            Default: True.
        redshift_data : bool
            Whether or not to include redshift outputs when gathering
            datasets for time series.
            Default: True.

        Examples
        --------

        >>> co = CosmologySplice("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo")
        >>> cosmo = co.create_cosmology_splice(1.0, 0.0)

        """

        if time_data and redshift_data:
            self.splice_outputs = self.simulation.all_outputs
        elif time_data:
            self.splice_outputs = self.simulation.all_time_outputs
        elif redshift_data:
            self.splice_outputs = self.simulation.all_redshift_outputs
        else:
            mylog.error('Both time_data and redshift_data are False.')
            return

        # Link datasets in list with pointers.
        # This is used for connecting datasets together.
        for i, output in enumerate(self.splice_outputs):
            if i == 0:
                output['previous'] = None
                output['next'] = self.splice_outputs[i + 1]
            elif i == len(self.splice_outputs) - 1:
                output['previous'] = self.splice_outputs[i - 1]
                output['next'] = None
            else:
                output['previous'] = self.splice_outputs[i - 1]
                output['next'] = self.splice_outputs[i + 1]

        # Calculate maximum delta z for each data dump.
        self.max_box_fraction = max_box_fraction
        self._calculate_deltaz_max()

        # Calculate minimum delta z for each data dump.
        self._calculate_deltaz_min(deltaz_min=deltaz_min)

        cosmology_splice = []
 
        if near_redshift == far_redshift:
            self.simulation.get_time_series(redshifts=[near_redshift])
            cosmology_splice.append(
                {'time': self.simulation[0].current_time,
                 'redshift': self.simulation[0].current_redshift,
                 'filename': os.path.join(self.simulation[0].fullpath,
                                          self.simulation[0].basename),
                 'next': None})
            mylog.info("create_cosmology_splice: Using %s for z = %f ." %
                       (cosmology_splice[0]['filename'], near_redshift))
            return cosmology_splice
        
        # Use minimum number of datasets to go from z_i to z_f.
        if minimal:

            z_Tolerance = 1e-3
            z = far_redshift

            # Sort data outputs by proximity to current redshift.
            self.splice_outputs.sort(key=lambda obj:np.fabs(z - obj['redshift']))
            cosmology_splice.append(self.splice_outputs[0])
            z = cosmology_splice[-1]["redshift"]
            z_target = z - cosmology_splice[-1]["dz_max"]

            # fill redshift space with datasets
            while ((z_target > near_redshift) and
                   (np.abs(z_target - near_redshift) > z_Tolerance)):

                # Move forward from last slice in stack until z > z_max.
                current_slice = cosmology_splice[-1]

                while current_slice["next"] is not None:
                    current_slice = current_slice['next']
                    if current_slice["next"] is None:
                        break
                    if current_slice["next"]["redshift"] < z_target:
                        break

                if current_slice["redshift"] < z_target:
                    need_fraction = self.cosmology.comoving_radial_distance(
                        current_slice["redshift"], z) / \
                        self.simulation.box_size
                    raise RuntimeError(
                        ("Cannot create cosmology splice: " +
                         "Getting from z = %f to %f requires " +
                         "max_box_fraction = %f, but max_box_fraction "
                         "is set to %f") %
                         (z, current_slice["redshift"],
                          need_fraction, max_box_fraction))

                cosmology_splice.append(current_slice)
                z = current_slice["redshift"]
                z_target = z - current_slice["dz_max"]

        # Make light ray using maximum number of datasets (minimum spacing).
        else:
            # Sort data outputs by proximity to current redshift.
            self.splice_outputs.sort(key=lambda obj:np.abs(far_redshift -
                                                           obj['redshift']))
            # For first data dump, choose closest to desired redshift.
            cosmology_splice.append(self.splice_outputs[0])

            nextOutput = cosmology_splice[-1]['next']
            while (nextOutput is not None):
                if (nextOutput['redshift'] <= near_redshift):
                    break
                if ((cosmology_splice[-1]['redshift'] - nextOutput['redshift']) >
                    cosmology_splice[-1]['dz_min']):
                    cosmology_splice.append(nextOutput)
                nextOutput = nextOutput['next']
            if (cosmology_splice[-1]['redshift'] -
                cosmology_splice[-1]['dz_max']) > near_redshift:
                mylog.error("Cosmology splice incomplete due to insufficient data outputs.")
                near_redshift = cosmology_splice[-1]['redshift'] - \
                  cosmology_splice[-1]['dz_max']

        mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
                   (len(cosmology_splice), far_redshift, near_redshift))
        
        # change the 'next' and 'previous' pointers to point to the correct outputs
        # for the created splice
        for i, output in enumerate(cosmology_splice):
            if len(cosmology_splice) == 1:
                output['previous'] = None
                output['next'] = None
            elif i == 0:
                output['previous'] = None
                output['next'] = cosmology_splice[i + 1]
            elif i == len(cosmology_splice) - 1:
                output['previous'] = cosmology_splice[i - 1]
                output['next'] = None
            else:
                output['previous'] = cosmology_splice[i - 1]
                output['next'] = cosmology_splice[i + 1]
        
        self.splice_outputs.sort(key=lambda obj: obj['time'])
        return cosmology_splice
    def _parse_index(self):
        f = open(self.index_filename, 'rb')
        grid = {}
        grid['read_field'] = None
        grid['read_type'] = None
        line = f.readline()
        while grid['read_field'] is None:
            parse_line(line, grid)
            if check_break(line): break
            line = f.readline()
        f.close()

        # It seems some datasets have a mismatch between ncells and
        # the actual grid dimensions.
        if np.prod(grid['dimensions']) != grid['ncells']:
            grid['dimensions'] -= 1
            grid['dimensions'][grid['dimensions'] == 0] = 1
        if np.prod(grid['dimensions']) != grid['ncells']:
            mylog.error(
                'product of dimensions %i not equal to number of cells %i' %
                (np.prod(grid['dimensions']), grid['ncells']))
            raise TypeError

        # Need to determine how many grids: self.num_grids
        dataset_dir = os.path.dirname(self.index_filename)
        dname = os.path.split(self.index_filename)[-1]
        if dataset_dir.endswith("id0"):
            dname = "id0/" + dname
            dataset_dir = dataset_dir[:-3]

        gridlistread = sglob(
            os.path.join(dataset_dir,
                         'id*/%s-id*%s' % (dname[4:-9], dname[-9:])))
        gridlistread.insert(0, self.index_filename)
        if 'id0' in dname:
            gridlistread += sglob(
                os.path.join(dataset_dir, 'id*/lev*/%s*-lev*%s' %
                             (dname[4:-9], dname[-9:])))
        else:
            gridlistread += sglob(
                os.path.join(dataset_dir,
                             'lev*/%s*-lev*%s' % (dname[:-9], dname[-9:])))
        ndots = dname.count(".")
        gridlistread = [
            fn for fn in gridlistread
            if os.path.basename(fn).count(".") == ndots
        ]
        self.num_grids = len(gridlistread)
        dxs = []
        levels = np.zeros(self.num_grids, dtype='int32')
        glis = np.empty((self.num_grids, 3), dtype='float64')
        gdds = np.empty((self.num_grids, 3), dtype='float64')
        gdims = np.ones_like(glis)
        j = 0
        self.grid_filenames = gridlistread
        while j < (self.num_grids):
            f = open(gridlistread[j], 'rb')
            gridread = {}
            gridread['read_field'] = None
            gridread['read_type'] = None
            line = f.readline()
            while gridread['read_field'] is None:
                parse_line(line, gridread)
                splitup = line.strip().split()
                if chk23('X_COORDINATES') in splitup:
                    gridread['left_edge'] = np.zeros(3)
                    gridread['dds'] = np.zeros(3)
                    v = np.fromfile(f, dtype='>f8', count=2)
                    gridread['left_edge'][0] = v[0] - 0.5 * (v[1] - v[0])
                    gridread['dds'][0] = v[1] - v[0]
                if chk23('Y_COORDINATES') in splitup:
                    v = np.fromfile(f, dtype='>f8', count=2)
                    gridread['left_edge'][1] = v[0] - 0.5 * (v[1] - v[0])
                    gridread['dds'][1] = v[1] - v[0]
                if chk23('Z_COORDINATES') in splitup:
                    v = np.fromfile(f, dtype='>f8', count=2)
                    gridread['left_edge'][2] = v[0] - 0.5 * (v[1] - v[0])
                    gridread['dds'][2] = v[1] - v[0]
                if check_break(line): break
                line = f.readline()
            f.close()
            levels[j] = gridread.get('level', 0)
            glis[j, 0] = gridread['left_edge'][0]
            glis[j, 1] = gridread['left_edge'][1]
            glis[j, 2] = gridread['left_edge'][2]
            # It seems some datasets have a mismatch between ncells and
            # the actual grid dimensions.
            if np.prod(gridread['dimensions']) != gridread['ncells']:
                gridread['dimensions'] -= 1
                gridread['dimensions'][gridread['dimensions'] == 0] = 1
            if np.prod(gridread['dimensions']) != gridread['ncells']:
                mylog.error(
                    'product of dimensions %i not equal to number of cells %i'
                    % (np.prod(gridread['dimensions']), gridread['ncells']))
                raise TypeError
            gdims[j, 0] = gridread['dimensions'][0]
            gdims[j, 1] = gridread['dimensions'][1]
            gdims[j, 2] = gridread['dimensions'][2]
            # Setting dds=1 for non-active dimensions in 1D/2D datasets
            gridread['dds'][gridread['dimensions'] == 1] = 1.
            gdds[j, :] = gridread['dds']

            j = j + 1

        gres = glis + gdims * gdds
        # Now we convert the glis, which were left edges (floats), to indices
        # from the domain left edge.  Then we do a bunch of fixing now that we
        # know the extent of all the grids.
        glis = np.round((glis - self.dataset.domain_left_edge.ndarray_view()) /
                        gdds).astype('int')
        new_dre = np.max(gres, axis=0)
        dre_units = self.dataset.domain_right_edge.uq
        self.dataset.domain_right_edge = np.round(new_dre,
                                                  decimals=12) * dre_units
        self.dataset.domain_width = \
                (self.dataset.domain_right_edge -
                 self.dataset.domain_left_edge)
        self.dataset.domain_center = \
                0.5*(self.dataset.domain_left_edge +
                     self.dataset.domain_right_edge)
        self.dataset.domain_dimensions = \
                np.round(self.dataset.domain_width/gdds[0]).astype('int')

        if self.dataset.dimensionality <= 2:
            self.dataset.domain_dimensions[2] = np.int(1)
        if self.dataset.dimensionality == 1:
            self.dataset.domain_dimensions[1] = np.int(1)

        dle = self.dataset.domain_left_edge
        dre = self.dataset.domain_right_edge
        dx_root = (
            self.dataset.domain_right_edge -
            self.dataset.domain_left_edge) / self.dataset.domain_dimensions

        if self.dataset.nprocs > 1:
            gle_all = []
            gre_all = []
            shapes_all = []
            levels_all = []
            new_gridfilenames = []
            file_offsets = []
            read_dims = []
            for i in range(levels.shape[0]):
                dx = dx_root / self.dataset.refine_by**(levels[i])
                gle_orig = self.ds.arr(
                    np.round(dle + dx * glis[i], decimals=12), "code_length")
                gre_orig = self.ds.arr(
                    np.round(gle_orig + dx * gdims[i], decimals=12),
                    "code_length")
                bbox = np.array([[le, re]
                                 for le, re in zip(gle_orig, gre_orig)])
                psize = get_psize(self.ds.domain_dimensions, self.ds.nprocs)
                gle, gre, shapes, slices = decompose_array(
                    gdims[i], psize, bbox)
                gle_all += gle
                gre_all += gre
                shapes_all += shapes
                levels_all += [levels[i]] * self.dataset.nprocs
                new_gridfilenames += [self.grid_filenames[i]
                                      ] * self.dataset.nprocs
                file_offsets += [[slc[0].start, slc[1].start, slc[2].start]
                                 for slc in slices]
                read_dims += [
                    np.array([gdims[i][0], gdims[i][1], shape[2]], dtype="int")
                    for shape in shapes
                ]
            self.num_grids *= self.dataset.nprocs
            self.grids = np.empty(self.num_grids, dtype='object')
            self.grid_filenames = new_gridfilenames
            self.grid_left_edge = self.ds.arr(gle_all, "code_length")
            self.grid_right_edge = self.ds.arr(gre_all, "code_length")
            self.grid_dimensions = np.array([shape for shape in shapes_all],
                                            dtype="int32")
            gdds = (self.grid_right_edge -
                    self.grid_left_edge) / self.grid_dimensions
            glis = np.round((self.grid_left_edge - self.ds.domain_left_edge) /
                            gdds).astype('int')
            for i in range(self.num_grids):
                self.grids[i] = self.grid(i, self, levels_all[i], glis[i],
                                          shapes_all[i], file_offsets[i],
                                          read_dims[i])
        else:
            self.grids = np.empty(self.num_grids, dtype='object')
            for i in range(levels.shape[0]):
                self.grids[i] = self.grid(i, self, levels[i], glis[i],
                                          gdims[i], [0] * 3, gdims[i])
                dx = dx_root / self.dataset.refine_by**(levels[i])
                dxs.append(dx)

            dx = self.ds.arr(dxs, "code_length")
            self.grid_left_edge = self.ds.arr(
                np.round(dle + dx * glis, decimals=12), "code_length")
            self.grid_dimensions = gdims.astype("int32")
            self.grid_right_edge = self.ds.arr(
                np.round(self.grid_left_edge + dx * self.grid_dimensions,
                         decimals=12), "code_length")
        if self.dataset.dimensionality <= 2:
            self.grid_right_edge[:, 2] = dre[2]
        if self.dataset.dimensionality == 1:
            self.grid_right_edge[:, 1:] = dre[1:]
        self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
    def sigmaM(self):
        """
         Written by BWO, 2006 (updated 25 January 2007).
         Converted to Python by Stephen Skory December 2009.

         This routine takes in cosmological parameters and creates a file (array) with
         sigma(M) in it, which is necessary for various press-schechter type
         stuff.  In principle one can calculate it ahead of time, but it's far,
         far faster in the long run to calculate your sigma(M) ahead of time.
        
         Inputs: cosmology, user must set parameters
        
         Outputs: four columns of data containing the following information:

         1) mass (Msolar/h)
         2) sigma (normalized) using Msun/h as the input
         
         The arrays output are used later.
        """
        
        # Set up the transfer function object.
        self.TF = TransferFunction(self.omega_matter0, self.omega_baryon0, 0.0, 0,
            self.omega_lambda0, self.hubble0, self.this_redshift);

        if self.TF.qwarn:
            mylog.error("You should probably fix your cosmology parameters!")

        # output arrays
        # 1) mass (M_solar/h), changed to M_solar/h at output
        self.masses_analytic = np.empty(self.num_sigma_bins, dtype='float64')
        # 2) sigma(M, z=0, where mass is in Msun/h)
        self.sigmaarray = np.empty(self.num_sigma_bins, dtype='float64')

        # get sigma_8 normalization
        R = 8.0;  # in units of Mpc/h (comoving)

        sigma8_unnorm = math.sqrt(self.sigma_squared_of_R(R));
        sigma_normalization = self.sigma8 / sigma8_unnorm;

        # rho0 in units of h^2 Msolar/Mpc^3
        rho0 = YTQuantity(self.omega_matter0 * rho_crit_g_cm3_h2 * self.hubble0**2,
                          'g/cm**3').in_units('Msun/Mpc**3')
        rho0 = rho0.value.item()       

        # spacing in mass of our sigma calculation
        dm = (float(self.log_mass_max) - self.log_mass_min)/self.num_sigma_bins;

        """
         loop over the total number of sigma_bins the user has requested. 
         For each bin, calculate mass and equivalent radius, and call
         sigma_squared_of_R to get the sigma(R) (equivalent to sigma(M)),
         normalize by user-specified sigma_8, and then write out.
        """
        for i in range(self.num_sigma_bins):
    
            # thislogmass is in units of Msolar, NOT Msolar/h
            thislogmass = self.log_mass_min +  i*dm
    
            # mass in units of h^-1 Msolar
            thismass = math.pow(10.0, thislogmass) * self.hubble0; 
    
            # radius is in units of h^-1 Mpc (comoving)
            thisradius = math.pow( 3.0*thismass / 4.0 / math.pi / rho0, 1.0/3.0 );
    
            R = thisradius; # h^-1 Mpc (comoving)
    
            self.masses_analytic[i] = thismass;  # Msun/h
    
            # get normalized sigma(R)
            self.sigmaarray[i] = math.sqrt(self.sigma_squared_of_R(R)) * sigma_normalization;
Beispiel #18
0
def load_hexahedral_mesh(
    data,
    connectivity,
    coordinates,
    length_unit=None,
    bbox=None,
    sim_time=0.0,
    mass_unit=None,
    time_unit=None,
    velocity_unit=None,
    magnetic_unit=None,
    periodicity=(True, True, True),
    geometry="cartesian",
    unit_system="cgs",
):
    r"""Load a hexahedral mesh of data into yt as a
    :class:`~yt.frontends.stream.data_structures.StreamHandler`.

    This should allow a semistructured grid of data to be loaded directly into
    yt and analyzed as would any others.  This comes with several caveats:

    * Units will be incorrect unless the data has already been converted to
      cgs.
    * Some functions may behave oddly, and parallelism will be
      disappointing or non-existent in most cases.
    * Particles may be difficult to integrate.

    Particle fields are detected as one-dimensional fields. The number of particles
    is set by the "number_of_particles" key in data.

    Parameters
    ----------
    data : dict
        This is a dict of numpy arrays, where the keys are the field names.
        There must only be one. Note that the data in the numpy arrays should
        define the cell-averaged value for of the quantity in in the hexahedral
        cell.
    connectivity : array_like
        This should be of size (N,8) where N is the number of zones.
    coordinates : array_like
        This should be of size (M,3) where M is the number of vertices
        indicated in the connectivity matrix.
    bbox : array_like (xdim:zdim, LE:RE), optional
        Size of computational domain in units of the length unit.
    sim_time : float, optional
        The simulation time in seconds
    mass_unit : string
        Unit to use for masses.  Defaults to unitless.
    time_unit : string
        Unit to use for times.  Defaults to unitless.
    velocity_unit : string
        Unit to use for velocities.  Defaults to unitless.
    magnetic_unit : string
        Unit to use for magnetic fields. Defaults to unitless.
    periodicity : tuple of booleans
        Determines whether the data will be treated as periodic along
        each axis
    geometry : string or tuple
        "cartesian", "cylindrical", "polar", "spherical", "geographic" or
        "spectral_cube".  Optionally, a tuple can be provided to specify the
        axis ordering -- for instance, to specify that the axis ordering should
        be z, x, y, this would be: ("cartesian", ("z", "x", "y")).  The same
        can be done for other coordinates, for instance:
        ("spherical", ("theta", "phi", "r")).

    """
    from yt.frontends.stream.data_structures import (
        StreamDictFieldHandler,
        StreamHandler,
        StreamHexahedralDataset,
    )
    from yt.frontends.stream.definitions import process_data, set_particle_types

    domain_dimensions = np.ones(3, "int32") * 2
    nprocs = 1
    if bbox is None:
        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64")
    domain_left_edge = np.array(bbox[:, 0], "float64")
    domain_right_edge = np.array(bbox[:, 1], "float64")
    grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1))

    field_units, data, _ = process_data(data)
    sfh = StreamDictFieldHandler()

    particle_types = set_particle_types(data)

    sfh.update({"connectivity": connectivity, "coordinates": coordinates, 0: data})
    # Simple check for axis length correctness
    if len(data) > 0:
        fn = list(sorted(data))[0]
        array_values = data[fn]
        if array_values.size != connectivity.shape[0]:
            mylog.error(
                "Dimensions of array must be one fewer than the coordinate set."
            )
            raise RuntimeError
    grid_left_edges = domain_left_edge
    grid_right_edges = domain_right_edge
    grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32")

    if length_unit is None:
        length_unit = "code_length"
    if mass_unit is None:
        mass_unit = "code_mass"
    if time_unit is None:
        time_unit = "code_time"
    if velocity_unit is None:
        velocity_unit = "code_velocity"
    if magnetic_unit is None:
        magnetic_unit = "code_magnetic"

    # I'm not sure we need any of this.
    handler = StreamHandler(
        grid_left_edges,
        grid_right_edges,
        grid_dimensions,
        grid_levels,
        -np.ones(nprocs, dtype="int64"),
        np.zeros(nprocs, dtype="int64").reshape(nprocs, 1),  # Temporary
        np.zeros(nprocs).reshape((nprocs, 1)),
        sfh,
        field_units,
        (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
        particle_types=particle_types,
        periodicity=periodicity,
    )

    handler.name = "HexahedralMeshData"
    handler.domain_left_edge = domain_left_edge
    handler.domain_right_edge = domain_right_edge
    handler.refine_by = 2
    handler.dimensionality = 3
    handler.domain_dimensions = domain_dimensions
    handler.simulation_time = sim_time
    handler.cosmology_simulation = 0

    sds = StreamHexahedralDataset(handler, geometry=geometry, unit_system=unit_system)

    return sds
    def __init__(self, omega_matter, omega_baryon, omega_hdm,
	    degen_hdm, omega_lambda, hubble, redshift):
        self.qwarn = 0;
        self.theta_cmb = 2.728/2.7 # Assuming T_cmb = 2.728 K
    
        # Look for strange input
        if (omega_baryon<0.0):
            mylog.error("TFmdm_set_cosm(): Negative omega_baryon set to trace amount.\n")
            self.qwarn = 1
        if (omega_hdm<0.0):
            mylog.error("TFmdm_set_cosm(): Negative omega_hdm set to trace amount.\n")
            self.qwarn = 1;
        if (hubble<=0.0):
            mylog.error("TFmdm_set_cosm(): Negative Hubble constant illegal.\n")
            return None
        elif (hubble>2.0):
            mylog.error("TFmdm_set_cosm(): Hubble constant should be in units of 100 km/s/Mpc.\n");
            self.qwarn = 1;
        if (redshift<=-1.0):
            mylog.error("TFmdm_set_cosm(): Redshift < -1 is illegal.\n");
            return None
        elif (redshift>99.0):
            mylog.error("TFmdm_set_cosm(): Large redshift entered.  TF may be inaccurate.\n");
            self.qwarn = 1;

        if (degen_hdm<1): degen_hdm=1;
        self.num_degen_hdm = degen_hdm;	
        # Have to save this for TFmdm_onek_mpc()
        # This routine would crash if baryons or neutrinos were zero,
        # so don't allow that.
        if (omega_baryon<=0): omega_baryon=1e-5;
        if (omega_hdm<=0): omega_hdm=1e-5;
    
        self.omega_curv = 1.0-omega_matter-omega_lambda;
        self.omhh = omega_matter*SQR(hubble);
        self.obhh = omega_baryon*SQR(hubble);
        self.onhh = omega_hdm*SQR(hubble);
        self.f_baryon = omega_baryon/omega_matter;
        self.f_hdm = omega_hdm/omega_matter;
        self.f_cdm = 1.0-self.f_baryon-self.f_hdm;
        self.f_cb = self.f_cdm+self.f_baryon;
        self.f_bnu = self.f_baryon+self.f_hdm;
    
        # Compute the equality scale.
        self.z_equality = 25000.0*self.omhh/SQR(SQR(self.theta_cmb)) # Actually 1+z_eq
        self.k_equality = 0.0746*self.omhh/SQR(self.theta_cmb);
    
        # Compute the drag epoch and sound horizon
        z_drag_b1 = 0.313*math.pow(self.omhh,-0.419)*(1+0.607*math.pow(self.omhh,0.674));
        z_drag_b2 = 0.238*math.pow(self.omhh,0.223);
        self.z_drag = 1291*math.pow(self.omhh,0.251)/(1.0+0.659*math.pow(self.omhh,0.828))* \
            (1.0+z_drag_b1*math.pow(self.obhh,z_drag_b2));
        self.y_drag = self.z_equality/(1.0+self.z_drag);
    
        self.sound_horizon_fit = 44.5*math.log(9.83/self.omhh)/math.sqrt(1.0+10.0*math.pow(self.obhh,0.75));
    
        # Set up for the free-streaming & infall growth function 
        self.p_c = 0.25*(5.0-math.sqrt(1+24.0*self.f_cdm));
        self.p_cb = 0.25*(5.0-math.sqrt(1+24.0*self.f_cb));
    
        omega_denom = omega_lambda+SQR(1.0+redshift)*(self.omega_curv+\
                omega_matter*(1.0+redshift));
        self.omega_lambda_z = omega_lambda/omega_denom;
        self.omega_matter_z = omega_matter*SQR(1.0+redshift)*(1.0+redshift)/omega_denom;
        self.growth_k0 = self.z_equality/(1.0+redshift)*2.5*self.omega_matter_z/ \
            (math.pow(self.omega_matter_z,4.0/7.0)-self.omega_lambda_z+ \
            (1.0+self.omega_matter_z/2.0)*(1.0+self.omega_lambda_z/70.0));
        self.growth_to_z0 = self.z_equality*2.5*omega_matter/(math.pow(omega_matter,4.0/7.0) \
            -omega_lambda + (1.0+omega_matter/2.0)*(1.0+omega_lambda/70.0));
        self.growth_to_z0 = self.growth_k0/self.growth_to_z0;	
        
        # Compute small-scale suppression
        self.alpha_nu = self.f_cdm/self.f_cb*(5.0-2.*(self.p_c+self.p_cb))/(5.-4.*self.p_cb)* \
        math.pow(1+self.y_drag,self.p_cb-self.p_c)* \
        (1+self.f_bnu*(-0.553+0.126*self.f_bnu*self.f_bnu))/ \
        (1-0.193*math.sqrt(self.f_hdm*self.num_degen_hdm)+0.169*self.f_hdm*math.pow(self.num_degen_hdm,0.2))* \
        (1+(self.p_c-self.p_cb)/2*(1+1/(3.-4.*self.p_c)/(7.-4.*self.p_cb))/(1+self.y_drag));
        self.alpha_gamma = math.sqrt(self.alpha_nu);
        self.beta_c = 1/(1-0.949*self.f_bnu);
        # Done setting scalar variables
        self.hhubble = hubble # Need to pass Hubble constant to TFmdm_onek_hmpc()
Beispiel #20
0
    def _parse_parameter_file(self):
        """Parse input datfile's header. Apply geometry_override if specified."""
        # required method
        self.unique_identifier = int(
            os.stat(self.parameter_filename)[stat.ST_CTIME])

        # populate self.parameters with header data
        with open(self.parameter_filename, "rb") as istream:
            self.parameters.update(get_header(istream))

        self.current_time = self.parameters["time"]
        self.dimensionality = self.parameters["ndim"]

        # force 3D for this definition
        dd = np.ones(3, dtype="int64")
        dd[:self.dimensionality] = self.parameters["domain_nx"]
        self.domain_dimensions = dd

        if self.parameters.get("staggered", False):
            mylog.warning(
                "'staggered' flag was found, but is currently ignored (unsupported)"
            )

        # parse geometry
        # by order of decreasing priority, we use
        # - geometry_override
        # - "geometry" parameter from datfile
        # - if all fails, default to "cartesian"
        self.geometry = None
        amrvac_geom = self.parameters.get("geometry", None)
        if amrvac_geom is not None:
            self.geometry = self._parse_geometry(amrvac_geom)
        elif self.parameters["datfile_version"] > 4:
            # py38: walrus here
            mylog.error(
                "No 'geometry' flag found in datfile with version %d >4.",
                self.parameters["datfile_version"],
            )

        if self._geometry_override is not None:
            # py38: walrus here
            try:
                new_geometry = self._parse_geometry(self._geometry_override)
                if new_geometry == self.geometry:
                    mylog.info(
                        "geometry_override is identical to datfile parameter.")
                else:
                    self.geometry = new_geometry
                    mylog.warning(
                        "Overriding geometry, this may lead to surprising results."
                    )
            except ValueError:
                mylog.error(
                    "Unable to parse geometry_override '%s' (will be ignored).",
                    self._geometry_override,
                )

        if self.geometry is None:
            mylog.warning(
                "No geometry parameter supplied or found, defaulting to cartesian."
            )
            self.geometry = "cartesian"

        # parse peridiocity
        per = self.parameters.get("periodic", np.array([False, False, False]))
        missing_dim = 3 - len(per)
        self.periodicity = np.append(per, [False] * missing_dim)

        self.gamma = self.parameters.get("gamma", 5.0 / 3.0)

        # parse domain edges
        dle = np.zeros(3)
        dre = np.ones(3)
        dle[:self.dimensionality] = self.parameters["xmin"]
        dre[:self.dimensionality] = self.parameters["xmax"]
        self.domain_left_edge = dle
        self.domain_right_edge = dre

        # defaulting to non-cosmological
        self.cosmological_simulation = 0
        self.current_redshift = 0.0
        self.omega_matter = 0.0
        self.omega_lambda = 0.0
        self.hubble_constant = 0.0
Beispiel #21
0
    def __init__(self,
                 data,
                 fields=None,
                 units="cm",
                 center=None,
                 scale=None,
                 wcs=None):
        r""" Initialize a FITSImageBuffer object.

        FITSImageBuffer contains a list of FITS ImageHDU instances, and
        optionally includes WCS information. It inherits from HDUList, so
        operations such as `writeto` are enabled. Images can be constructed
        from ImageArrays, NumPy arrays, dicts of such arrays,
        FixedResolutionBuffers, and YTCoveringGrids. The latter two are the
        most powerful because WCS information can be constructed from their
        coordinates.

        Parameters
        ----------
        data : FixedResolutionBuffer or a YTCoveringGrid. Or, an
            ImageArray, an numpy.ndarray, or dict of such arrays
            The data to be made into a FITS image or images.
        fields : single string or list of strings, optional
            The field names for the data. If *fields* is none and *data* has
            keys, it will use these for the fields. If *data* is just a
            single array one field name must be specified.
        units : string
            The units of the WCS coordinates, default "cm". 
        center : array_like, optional
            The coordinates [xctr,yctr] of the images in units
            *units*. If *units* is not specified, defaults to the origin. 
        scale : tuple of floats, optional
            Pixel scale in unit *units*. Will be ignored if *data* is
            a FixedResolutionBuffer or a YTCoveringGrid. Must be
            specified otherwise, or if *units* is "deg".
        wcs : `astropy.wcs.WCS` instance, optional
            Supply an AstroPy WCS instance to override automatic WCS creation.

        Examples
        --------

        >>> ds = load("sloshing_nomag2_hdf5_plt_cnt_0150")
        >>> prj = ds.proj(2, "kT", weight_field="density")
        >>> frb = prj.to_frb((0.5, "Mpc"), 800)
        >>> # This example just uses the FRB and puts the coords in kpc.
        >>> f_kpc = FITSImageBuffer(frb, fields="kT", units="kpc")
        >>> # This example specifies sky coordinates.
        >>> scale = [1./3600.]*2 # One arcsec per pixel
        >>> f_deg = FITSImageBuffer(frb, fields="kT", units="deg",
                                    scale=scale, center=(30., 45.))
        >>> f_deg.writeto("temp.fits")
        """

        super(FITSImageBuffer, self).__init__()

        if isinstance(fields, basestring): fields = [fields]

        exclude_fields = [
            'x', 'y', 'z', 'px', 'py', 'pz', 'pdx', 'pdy', 'pdz',
            'weight_field'
        ]

        if hasattr(data, 'keys'):
            img_data = data
        else:
            img_data = {}
            if fields is None:
                mylog.error("Please specify a field name for this array.")
                raise KeyError
            img_data[fields[0]] = data

        if fields is None: fields = img_data.keys()
        if len(fields) == 0:
            mylog.error("Please specify one or more fields to write.")
            raise KeyError

        first = True

        for key in fields:
            if key not in exclude_fields:
                mylog.info("Making a FITS image of field %s" % (key))
                if first:
                    hdu = pyfits.PrimaryHDU(np.array(img_data[key]))
                    first = False
                else:
                    hdu = pyfits.ImageHDU(np.array(img_data[key]))
                hdu.name = key
                hdu.header["btype"] = key
                if hasattr(img_data[key], "units"):
                    hdu.header["bunit"] = str(img_data[key].units)
                self.append(hdu)

        self.dimensionality = len(self[0].data.shape)

        if self.dimensionality == 2:
            self.nx, self.ny = self[0].data.shape
        elif self.dimensionality == 3:
            self.nx, self.ny, self.nz = self[0].data.shape

        has_coords = (isinstance(img_data, FixedResolutionBuffer)
                      or isinstance(img_data, YTCoveringGridBase))

        if center is None:
            if units == "deg":
                mylog.error("Please specify center=(RA, Dec) in degrees.")
                raise ValueError
            elif not has_coords:
                mylog.warning("Setting center to the origin.")
                center = [0.0] * self.dimensionality

        if scale is None:
            if units == "deg" or not has_coords and wcs is None:
                mylog.error("Please specify scale=(dx,dy[,dz]) in %s." %
                            (units))
                raise ValueError

        if wcs is None:
            w = pywcs.WCS(header=self[0].header, naxis=self.dimensionality)
            w.wcs.crpix = 0.5 * (np.array(self.shape) + 1)
            proj_type = ["linear"] * self.dimensionality
            if isinstance(img_data, FixedResolutionBuffer) and units != "deg":
                # FRBs are a special case where we have coordinate
                # information, so we take advantage of this and
                # construct the WCS object
                dx = (img_data.bounds[1] -
                      img_data.bounds[0]).in_units(units) / self.nx
                dy = (img_data.bounds[3] -
                      img_data.bounds[2]).in_units(units) / self.ny
                xctr = 0.5 * (img_data.bounds[1] +
                              img_data.bounds[0]).in_units(units)
                yctr = 0.5 * (img_data.bounds[3] +
                              img_data.bounds[2]).in_units(units)
                center = [xctr, yctr]
            elif isinstance(img_data, YTCoveringGridBase):
                dx, dy, dz = img_data.dds.in_units(units)
                center = 0.5 * (img_data.left_edge +
                                img_data.right_edge).in_units(units)
            elif units == "deg" and self.dimensionality == 2:
                dx = -scale[0]
                dy = scale[1]
                proj_type = ["RA---TAN", "DEC--TAN"]
            else:
                dx = scale[0]
                dy = scale[1]
                if self.dimensionality == 3: dz = scale[2]

            w.wcs.crval = center
            w.wcs.cunit = [units] * self.dimensionality
            w.wcs.ctype = proj_type

            if self.dimensionality == 2:
                w.wcs.cdelt = [dx, dy]
            elif self.dimensionality == 3:
                w.wcs.cdelt = [dx, dy, dz]

            self._set_wcs(w)

        else:

            self._set_wcs(wcs)