Beispiel #1
0
    def init_extinction(self):
        """Initialization attenuation laws"""

        from caesar.pyloser.atten_laws import calzetti, chevallard, conroy, cardelli, smc, lmc
        wave = self.ssp_wavelengths.astype(np.float64)
        self.ext_curves = []
        self.ext_curves.append(calzetti(wave))
        self.ext_curves.append(chevallard(wave))
        self.ext_curves.append(conroy(wave))
        self.ext_curves.append(cardelli(wave))
        self.ext_curves.append(smc(wave))
        self.ext_curves.append(lmc(wave))
        self.ext_curves = np.asarray(self.ext_curves)

        if 'calzetti' in self.ext_law: self.ext_law = 0
        elif 'chevallard' in self.ext_law: self.ext_law = 1
        elif 'conroy' in self.ext_law: self.ext_law = 2
        elif self.ext_law == 'mw' or self.ext_law == 'cardelli' or 'CCM' in self.ext_law:
            self.ext_law = 3
        elif 'smc' in self.ext_law:
            self.ext_law = 4
        elif 'lmc' in self.ext_law:
            self.ext_law = 5
        elif self.ext_law == 'mix_calz_MW':
            self.ext_law = 6
        elif self.ext_law == 'composite':
            self.ext_law = 7
        else:
            mylog.warning(
                'Extinction law %s not recognized, assuming composite' %
                self.ext_law)
            self.ext_law = 7
Beispiel #2
0
    def contamination_check(self,
                            lowres=[2, 3, 5],
                            search_factor=1.0,
                            printer=True):
        """Check for low resolution particle contamination.

        This method checks for low-resolution particles within 
        ``search_factor`` of the maximum halo radius.  When this
        method is called on a galaxy, it refers to the parent halo.

        Parameters
        ----------
        lowres : list, optional
            Particle types to be considered low-res.  Defaults to
            [2,3,5]; if your simulation contains blackholes you will
            want to pass in [2,3]; if your simulation contains active
			dust particles you will not include 3.
        search_factor : float, optional
            Factor to expand the maximum halo radius search distance
            by.  Default is 2.5
        printer : boolean, optional
            Print results?

        Notes
        -----
        This method currently ONLY works on GADGET/GIZMO HDF5 files.

        """
        from yt.funcs import mylog
        from caesar.zoom_funcs import construct_lowres_tree

        construct_lowres_tree(self, lowres)

        if self.obj_type == 'halo':
            halo = self
            ID = 'Halo %d' % self.GroupID
        elif self.obj_type == 'galaxy':
            if self.halo == None:
                raise Exception('Galaxy %d has no halo!' % self.GroupID)
            halo = self.halo
            ID = "Galaxy %d's halo (ID %d)" % (self.GroupID, halo.GroupID)

        r = halo.virial_quantities['r200c'].d * search_factor

        result = self.obj._lowres['TREE'].query_ball_point(halo.pos.d, r)
        ncontam = len(result)
        lrmass = np.sum(self.obj._lowres['MASS'][result])

        self.contamination = lrmass / halo.virial_quantities['m200c'].d

        if not printer:
            return

        if ncontam > 0:
            mylog.warning('%s has %0.2f%% mass contamination ' \
                          '(%d LR particles with %0.2e % s)' %
                          (ID, self.contamination * 100.0, ncontam,
                           lrmass, halo.masses['total'].units))
        else:
            mylog.info('%s has NO contamination!' % ID)
 def setup_particle_aliases(self):
     known_particle_fields = dict(self.known_particle_fields)
     for field in sorted(self.field_list):
         if not isinstance(field, tuple):
             raise RuntimeError
         if field[0] in self.ds.fluid_types:
             continue
         args = known_particle_fields.get(
             field[1], ("", [], None))
         units, aliases, display_name = args
         # We allow field_units to override this.  First we check if the
         # field *name* is in there, then the field *tuple*.
         units = self.ds.field_units.get(field[1], units)
         units = self.ds.field_units.get(field, units)
         if not isinstance(units, str) and args[0] != "":
             units = "((%s)*%s)" % (args[0], units)
         if isinstance(units, (numeric_type, np.number, np.ndarray)) and \
             args[0] == "" and units != 1.0:
             mylog.warning("Cannot interpret units: %s * %s, " +
                           "setting to dimensionless.", units, args[0])
             units = ""
         elif units == 1.0:
             units = ""
         self.add_output_field(field, units = units,
                               display_name = display_name)
Beispiel #4
0
    def retrieve_ghost_zones(self, ngz, fields, smoothed=False):
        if smoothed:
            mylog.warning(
                f"{self}.retrieve_ghost_zones was called with the "
                f"`smoothed` argument set to True. This is not supported, "
                "ignoring it."
            )
            smoothed = False

        _subset_with_gz = getattr(self, "_subset_with_gz", {})

        try:
            new_subset = _subset_with_gz[ngz]
            mylog.debug(
                "Reusing previous subset with %s ghost zones for domain %s",
                ngz,
                self.domain_id,
            )
        except KeyError:
            new_subset = RAMSESDomainSubset(
                self.base_region,
                self.domain,
                self.ds,
                num_ghost_zones=ngz,
                base_grid=self,
            )
            _subset_with_gz[ngz] = new_subset

        # Cache the fields
        new_subset.get_data(fields)
        self._subset_with_gz = _subset_with_gz

        return new_subset
Beispiel #5
0
 def __init__(self, filename, dataset_type='athena',
              storage_filename=None, parameters=None,
              units_override=None, nprocs=1, unit_system="cgs"):
     self.fluid_types += ("athena",)
     self.nprocs = nprocs
     if parameters is None:
         parameters = {}
     self.specified_parameters = parameters.copy()
     if units_override is None:
         units_override = {}
     # This is for backwards-compatibility
     already_warned = False
     for k, v in list(self.specified_parameters.items()):
         if k.endswith("_unit") and k not in units_override:
             if not already_warned:
                 mylog.warning("Supplying unit conversions from the parameters dict is deprecated, "+
                               "and will be removed in a future release. Use units_override instead.")
                 already_warned = True
             units_override[k] = self.specified_parameters.pop(k)
     Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                      unit_system=unit_system)
     self.filename = filename
     if storage_filename is None:
         storage_filename = '%s.yt' % filename.split('/')[-1]
     self.storage_filename = storage_filename
     self.backup_filename = self.filename[:-4] + "_backup.gdf"
     # Unfortunately we now have to mandate that the index gets
     # instantiated so that we can make sure we have the correct left
     # and right domain edges.
     self.index
Beispiel #6
0
    def __init__(
        self,
        base_region,
        domain,
        ds,
        over_refine_factor=1,
        num_ghost_zones=0,
        base_grid=None,
    ):
        super(RAMSESDomainSubset, self).__init__(
            base_region, domain, ds, over_refine_factor, num_ghost_zones
        )

        self._base_grid = base_grid

        if num_ghost_zones > 0:
            if not all(ds.periodicity):
                mylog.warning(
                    "Ghost zones will wrongly assume the domain to be periodic."
                )
            # Create a base domain *with no self._base_domain.fwidth
            base_domain = RAMSESDomainSubset(
                ds.all_data(), domain, ds, over_refine_factor
            )
            self._base_domain = base_domain
        elif num_ghost_zones < 0:
            raise RuntimeError(
                "Cannot initialize a domain subset with a negative number "
                "of ghost zones, was called with num_ghost_zones=%s" % num_ghost_zones
            )
Beispiel #7
0
    def _load_bh_data(self, select='all'):
        """If blackholes are present, loads BH_Mdot"""

        if select is 'all': 
            flag = [True]*self.obj.simulation.nbh
        else:
            flag = (select>=0)

        if has_property(self.obj, 'bh', 'bhmass'):
            self.bhmass     = self.obj.yt_dataset.arr(get_property(self.obj, 'bhmass', 'bh').d[flag]*1e10, 'Msun/h').to(self.obj.units['mass'])  # I don't know how to convert this automatically
            self.use_bhmass = True
        else:
            mylog.warning('No black holes found')
            self.use_bhmass = False

        if has_property(self.obj, 'bh', 'bhmdot') and self.use_bhmass:
            #units mutlitplied by ((All.UnitMass_in_g / SOLAR_MASS) / (All.UnitTime_in_s / SEC_PER_YEAR))
            bhmdot_unit = '10.22465727143273*Msun/h/yr'
            #bhmdot_unit = '15.036260693283424*Msun/yr'
            #bhmdot_unit = '%s/%s' %(self.obj.units['mass'], self.obj.units['time'])

            bhmdot      = get_property(self.obj, 'bhmdot', 'bh').d[flag] #of course  it is dimentionless
            bhmdot      = self.obj.yt_dataset.arr(bhmdot, bhmdot_unit).to('%s/%s' %(self.obj.units['mass'], self.obj.units['time']))
            self.bhmdot = bhmdot
            #mylog.info('BH_Mdot available, units=%s'%bhmdot_unit)
        else: 
            if self.use_bhmass: 
                mylog.warning('Black holes are there, but BH_Mdot not available!')
Beispiel #8
0
 def _set_code_unit_attributes(self):
     """
     Generates the conversion to various physical _units based on the parameter file
     """
     for unit, cgs in [
         ("length", "cm"),
         ("time", "s"),
         ("mass", "g"),
         ("velocity", "cm/s"),
         ("magnetic", "gauss"),
     ]:
         if unit == "magnetic":
             short_unit = "bfunit"
         else:
             short_unit = "%sunit" % unit[0]
         if short_unit in self.primary_header:
             # units should now be in header
             u = self.quan(
                 self.primary_header[short_unit],
                 self.primary_header.comments[short_unit].strip("[]"),
             )
             mylog.info("Found %s units of %s." % (unit, u))
         else:
             if unit == "length":
                 # Falling back to old way of getting units for length
                 # in old files
                 u = self.quan(1.0, str(self.wcs.wcs.cunit[0]))
                 mylog.info("Found %s units of %s." % (unit, u))
             else:
                 # Give up otherwise
                 u = self.quan(1.0, cgs)
                 mylog.warning(
                     "No unit for %s found. Assuming 1.0 code_%s = 1.0 %s" %
                     (unit, unit, cgs))
         setdefaultattr(self, "%s_unit" % unit, u)
    def save(self, name, mpl_kwargs=None, canvas=None):
        """Choose backend and save image to disk"""
        if mpl_kwargs is None:
            mpl_kwargs = {}
        if 'papertype' not in mpl_kwargs:
            mpl_kwargs['papertype'] = 'auto'

        suffix = get_image_suffix(name)
        if suffix == '':
            suffix = '.png'
            name = "%s%s" % (name, suffix)

        mylog.info("Saving plot %s", name)

        if suffix == ".png":
            canvas = FigureCanvasAgg(self.figure)
        elif suffix == ".pdf":
            canvas = FigureCanvasPdf(self.figure)
        elif suffix in (".eps", ".ps"):
            canvas = FigureCanvasPS(self.figure)
        else:
            mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
            canvas = self.canvas

        canvas.print_figure(name, **mpl_kwargs)
        return name
Beispiel #10
0
    def _load_star_data(self, select='all'):
        """If star is present load Metallicity if present"""
        if self.obj.simulation.nstar == 0:
            return

        if select is 'all': 
            flag = [True]*self.obj.simulation.nstar
        else:
            flag = (select>=0)

        if has_property(self.obj, 'star', 'metallicity'):
            self.sZ  = get_property(self.obj, 'metallicity', 'star')[flag]
        elif has_property(self.obj, 'star', 'met_tng'):  # try Illustris/TNG alias
            self.sZ  = get_property(self.obj, 'met_tng', 'star')[flag]  
            #self.sZ  = np.sum(self.sZ.T[2:],axis=0)  # first two are H,He; the rest sum to give metallicity
            #self.sZ[self.sZ<0] = 0.  # some (very small) negative values, set to 0
        else:
            mylog.warning('Metallicity not found: setting all stars to solar=0.0134')
            self.sZ = 0.0134*np.ones(self.obj.simulation.nstar,dtype=MY_DTYPE)

        ds = self.obj.yt_dataset
        if has_property(self.obj, 'star', 'aform'):
            self.age  = get_property(self.obj, 'aform', 'star')[flag]  # a_exp at time of formation
        elif has_property(self.obj, 'star', 'aform_tng'):  # try Illustris/TNG alias
            self.age  = get_property(self.obj, 'aform_tng', 'star')[flag]  
            self.age  = abs(self.age)  # some negative values here too; not sure what to do?
        else:
            self.age = np.zeros(self.obj.simulation.nstar,dtype=MY_DTYPE)
            mylog.warning('Stellar age not found -- photometry will be incorrect!')
        if ds.cosmological_simulation:
            from yt.utilities.cosmology import Cosmology
            co = Cosmology(hubble_constant=ds.hubble_constant, omega_matter=ds.omega_matter, omega_lambda=ds.omega_lambda)
            self.age = (ds.current_time - co.t_from_z(1./self.age-1.)).in_units('Gyr').astype(MY_DTYPE)  # age at time of snapshot 
Beispiel #11
0
    def detect_fields(cls, ds):
        ndim = ds.dimensionality
        iout = int(str(ds).split("_")[1])
        basedir = os.path.split(ds.parameter_filename)[0]
        fname = os.path.join(basedir, cls.fname.format(iout=iout, icpu=1))
        with FortranFile(fname) as fd:
            cls.parameters = fd.read_attrs(cls.attrs)

        nvar = cls.parameters["nvar"]
        ndim = ds.dimensionality

        if nvar == ndim + 1:
            fields = ["potential"
                      ] + [f"{k}-acceleration" for k in "xyz"[:ndim]]
            ndetected = ndim
        else:
            fields = [f"{k}-acceleration" for k in "xyz"[:ndim]]
            ndetected = ndim

        if ndetected != nvar and not ds._warned_extra_fields["gravity"]:
            mylog.warning("Detected %s extra gravity fields.",
                          nvar - ndetected)
            ds._warned_extra_fields["gravity"] = True

            for i in range(nvar - ndetected):
                fields.append(f"var{i}")

        cls.field_list = [(cls.ftype, e) for e in fields]

        return fields
Beispiel #12
0
 def setup_fluid_aliases(self):
     known_other_fields = dict(self.known_other_fields)
     for field in sorted(self.field_list):
         if not isinstance(field, tuple):
             raise RuntimeError
         if field[0] in self.ds.particle_types:
             continue
         args = known_other_fields.get(field[1], ("", [], None))
         units, aliases, display_name = args
         # We allow field_units to override this.  First we check if the
         # field *name* is in there, then the field *tuple*.
         units = self.ds.field_units.get(field[1], units)
         units = self.ds.field_units.get(field, units)
         if not isinstance(units, string_types) and args[0] != "":
             units = "((%s)*%s)" % (args[0], units)
         if isinstance(units, (numeric_type, np.number, np.ndarray)) and \
             args[0] == "" and units != 1.0:
             mylog.warning(
                 "Cannot interpret units: %s * %s, " +
                 "setting to dimensionless.", units, args[0])
             units = ""
         elif units == 1.0:
             units = ""
         self.add_output_field(field,
                               sampling_type="cell",
                               units=units,
                               display_name=display_name)
         for alias in aliases:
             self.alias(("gas", alias), field)
Beispiel #13
0
    def __init__(
        self,
        filename,
        dataset_type="flash_hdf5",
        storage_filename=None,
        particle_filename=None,
        units_override=None,
        unit_system="cgs",
    ):

        self.fluid_types += ("flash", )
        if self._handle is not None:
            return
        self._handle = HDF5FileHandler(filename)

        self.particle_filename = particle_filename

        if self.particle_filename is None:
            # try to guess the particle filename
            try:
                self._particle_handle = HDF5FileHandler(
                    filename.replace("plt_cnt", "part"))
                self.particle_filename = filename.replace("plt_cnt", "part")
                mylog.info("Particle file found: %s",
                           self.particle_filename.split("/")[-1])
            except OSError:
                self._particle_handle = self._handle
        else:
            # particle_filename is specified by user
            self._particle_handle = HDF5FileHandler(self.particle_filename)

        # Check if the particle file has the same time
        if self._particle_handle != self._handle:
            part_time = self._particle_handle.handle.get("real scalars")[0][1]
            plot_time = self._handle.handle.get("real scalars")[0][1]
            if not np.isclose(part_time, plot_time):
                self._particle_handle = self._handle
                mylog.warning(
                    "%s and %s are not at the same time. "
                    "This particle file will not be used.",
                    self.particle_filename,
                    filename,
                )

        # These should be explicitly obtained from the file, but for now that
        # will wait until a reorganization of the source tree and better
        # generalization.
        self.refine_by = 2

        Dataset.__init__(
            self,
            filename,
            dataset_type,
            units_override=units_override,
            unit_system=unit_system,
        )
        self.storage_filename = storage_filename

        self.parameters["HydroMethod"] = "flash"  # always PPM DE
        self.parameters["Time"] = 1.0  # default unit is 1...
Beispiel #14
0
    def _setup_parameters(self, ts):
        if self.workgroup.name != "readers": return None
        tds = ts[0]
        ptype = self.particle_type
        if ptype not in tds.particle_types and ptype != 'all':
            has_particle_filter = tds.add_particle_filter(ptype)
            if not has_particle_filter:
                raise RuntimeError("Particle type (filter) %s not found." % (ptype))

        dd = tds.all_data()
        # Get DM particle mass.

        particle_mass = self.particle_mass
        if particle_mass is None:
            pmass_min, pmass_max = dd.quantities.extrema(
                (ptype, "particle_mass"), non_zero = True)
            if np.abs(pmass_max - pmass_min) / pmass_max > 0.01:
                raise YTRockstarMultiMassNotSupported(pmass_min, pmass_max,
                    ptype)
            particle_mass = pmass_min

        p = {}
        if self.total_particles is None:
            # Get total_particles in parallel.
            tp = dd.quantities.total_quantity((ptype, "particle_ones"))
            p['total_particles'] = int(tp)
            mylog.warning("Total Particle Count: %0.3e", int(tp))
        p['left_edge'] = tds.domain_left_edge.in_units("Mpccm/h")
        p['right_edge'] = tds.domain_right_edge.in_units("Mpccm/h")
        p['center'] = (tds.domain_right_edge.in_units("Mpccm/h") + tds.domain_left_edge.in_units("Mpccm/h"))/2.0
        p['particle_mass'] = self.particle_mass = particle_mass
        p['particle_mass'].convert_to_units("Msun / h")
        del tds
        return p
Beispiel #15
0
 def _count_particles(self, data_file):
     pcount = self._handle['x'].size
     if (pcount > 1e9):
         mylog.warning("About to load %i particles into memory. " %
                       (pcount) +
                       "You may want to consider a midx-enabled load")
     return {'dark_matter': pcount}
Beispiel #16
0
 def __init__(self,
              start_point,
              end_point,
              ds=None,
              field_parameters=None,
              data_source=None):
     validate_3d_array(start_point)
     validate_3d_array(end_point)
     validate_object(ds, Dataset)
     validate_object(field_parameters, dict)
     validate_object(data_source, YTSelectionContainer)
     super(YTRay, self).__init__(ds, field_parameters, data_source)
     if isinstance(start_point, YTArray):
         self.start_point = self.ds.arr(start_point).to("code_length")
     else:
         self.start_point = self.ds.arr(start_point,
                                        "code_length",
                                        dtype="float64")
     if isinstance(end_point, YTArray):
         self.end_point = self.ds.arr(end_point).to("code_length")
     else:
         self.end_point = self.ds.arr(end_point,
                                      "code_length",
                                      dtype="float64")
     if (self.start_point < self.ds.domain_left_edge).any() or (
             self.end_point > self.ds.domain_right_edge).any():
         mylog.warning(
             "Ray start or end is outside the domain. "
             "Returned data will only be for the ray section inside the domain."
         )
     self.vec = self.end_point - self.start_point
     self._set_center(self.start_point)
     self.set_field_parameter("center", self.start_point)
     self._dts, self._ts = None, None
    def save(self, name, mpl_kwargs=None, canvas=None):
        """Choose backend and save image to disk"""
        from ._mpl_imports import \
            FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
        if mpl_kwargs is None:
            mpl_kwargs = {}
        if 'papertype' not in mpl_kwargs:
            mpl_kwargs['papertype'] = 'auto'

        suffix = get_image_suffix(name)
        if suffix == '':
            suffix = '.png'
            name = "%s%s" % (name, suffix)

        mylog.info("Saving plot %s", name)

        if suffix == ".png":
            canvas = FigureCanvasAgg(self.figure)
        elif suffix == ".pdf":
            canvas = FigureCanvasPdf(self.figure)
        elif suffix in (".eps", ".ps"):
            canvas = FigureCanvasPS(self.figure)
        else:
            mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
            canvas = self.canvas

        with matplotlib_style_context():
            canvas.print_figure(name, **mpl_kwargs)
        return name
Beispiel #18
0
 def _set_code_unit_attributes(self):
     arepo_unit_base = self._get_uvals()
     # This rather convoluted logic is required to ensure that
     # units which are present in the Arepo dataset will be used
     # no matter what but that the user gets warned
     if arepo_unit_base is not None:
         if self._unit_base is None:
             self._unit_base = arepo_unit_base
         else:
             for unit in arepo_unit_base:
                 if unit == "cmcm":
                     continue
                 short_unit = unit.split("_")[0][4:].lower()
                 if short_unit in self._unit_base:
                     which_unit = short_unit
                     self._unit_base.pop(short_unit, None)
                 elif unit in self._unit_base:
                     which_unit = unit
                 else:
                     which_unit = None
                 if which_unit is not None:
                     msg = f"Overwriting '{which_unit}' in unit_base with what we found in the dataset."
                     mylog.warning(msg)
                 self._unit_base[unit] = arepo_unit_base[unit]
             if "cmcm" in arepo_unit_base:
                 self._unit_base["cmcm"] = arepo_unit_base["cmcm"]
     super()._set_code_unit_attributes()
     munit = np.sqrt(self.mass_unit / (self.time_unit**2 * self.length_unit)).to(
         "gauss"
     )
     if self.cosmological_simulation:
         self.magnetic_unit = self.quan(munit.value, f"{munit.units}/a**2")
     else:
         self.magnetic_unit = munit
Beispiel #19
0
    def detect_fields(cls, ds):
        ndim = ds.dimensionality
        iout = int(str(ds).split('_')[1])
        basedir = os.path.split(ds.parameter_filename)[0]
        fname = os.path.join(basedir, cls.fname.format(iout=iout, icpu=1))
        with FortranFile(fname) as fd:
            cls.parameters = fd.read_attrs(cls.attrs)

        nvar = cls.parameters['nvar']
        ndim = ds.dimensionality

        if nvar == ndim + 1:
            fields = ['potential'] + ['%s-acceleration' % k for k in 'xyz'[:ndim]]
            ndetected = ndim
        else:
            fields = ['%s-acceleration' % k for k in 'xyz'[:ndim]]
            ndetected = ndim

        if ndetected != nvar and not ds._warned_extra_fields['gravity']:
            mylog.warning('Detected %s extra gravity fields.',
                          nvar-ndetected)
            ds._warned_extra_fields['gravity'] = True

            for i in range(nvar-ndetected):
                fields.append('var%s' % i)

        cls.field_list = [(cls.ftype, e) for e in fields]

        return fields
Beispiel #20
0
    def build_volume_bvh(self):
        """

        This constructs the mesh that will be ray-traced.

        """
        ftype, fname = self.field
        mesh_id = int(ftype[-1]) - 1
        index = self.data_source.ds.index
        offset = index.meshes[mesh_id]._index_offset
        field_data = self.data_source[self.field].d  # strip units

        vertices = index.meshes[mesh_id].connectivity_coords
        indices = index.meshes[mesh_id].connectivity_indices - offset

        # if this is an element field, promote to 2D here
        if len(field_data.shape) == 1:
            field_data = np.expand_dims(field_data, 1)

        # Here, we decide whether to render based on high-order or
        # low-order geometry.
        if indices.shape[1] == 27:
            # hexahedral
            mylog.warning(
                "27-node hexes not yet supported, dropping to 1st order.")
            field_data = field_data[:, 0:8]
            indices = indices[:, 0:8]

        self.volume = BVH(vertices, indices, field_data)
Beispiel #21
0
    def parse_line(self, line, ascfile):
        """Parse a line of sdf"""

        if 'struct' in line:
            self.parse_struct(line, ascfile)
            return

        if "#" in line:
            self.comments.append(line)
            return

        spl = _lstrip(line.split("="))
        vtype, vname = _lstrip(spl[0].split())
        vname = vname.strip("[]")
        vval = spl[-1].strip(";")
        if vtype == 'parameter':
            self.parameters[vname] = vval
            return
        elif vtype == "char":
            vtype = "str"

        try:
            vval = eval("np." + vtype + "(%s)" % vval)
        except AttributeError:
            if vtype not in _types:
                mylog.warning("Skipping parameter %s", vname)
                return
            vval = eval("np." + _types[vtype] + "(%s)" % vval)

        self.parameters[vname] = vval
Beispiel #22
0
 def _determine_ptypes(self):
     """Determines what particle/field types to collect."""
     self.ptypes = ['gas', 'star']
     #if 'blackholes' in self.obj._kwargs and self.obj._kwargs['blackholes']:
     self.blackholes = self.dust = self.dm2 = False
     if hasattr(self.obj, '_ds_type'):
         if 'PartType5' in self.obj._ds_type.ds.particle_fields_by_type:
             if 'BH_Mdot' in self.obj._ds_type.ds.particle_fields_by_type[
                     'PartType5'] or 'StellarFormationTime' in self.obj._ds_type.ds.particle_fields_by_type[
                         'PartType5']:
                 self.ptypes.append('bh')
                 self.blackholes = True
         else:
             memlog('No black holes found')
     if hasattr(
             self.obj, '_kwargs'
     ) and 'dust' in self.obj._kwargs and self.obj._kwargs['dust']:
         mylog.warning('Enabling active dust particles')
         self.ptypes.append('dust')
         self.dust = True
     self.ptypes.append('dm')
     if hasattr(self.obj, '_kwargs'
                ) and 'dm2' in self.obj._kwargs and self.obj._kwargs['dm2']:
         self.ptypes.append('dm2')
         self.dm2 = True
Beispiel #23
0
    def parse_line(self, line, ascfile):
        """Parse a line of sdf"""


        if 'struct' in line:
            self.parse_struct(line, ascfile)
            return

        if "#" in line:
            self.comments.append(line)
            return

        spl = _lstrip(line.split("="))
        vtype, vname = _lstrip(spl[0].split())
        vname = vname.strip("[]")
        vval = spl[-1].strip(";")
        if vtype == 'parameter':
            self.parameters[vname] = vval
            return
        elif vtype == "char":
            vtype = "str"

        try:
            vval = eval("np."+vtype+"(%s)" % vval)
        except AttributeError:
            if vtype not in _types:
                mylog.warning("Skipping parameter %s", vname)
                return
            vval = eval("np."+_types[vtype]+"(%s)" % vval)

        self.parameters[vname] = vval
Beispiel #24
0
    def contamination_check(self,
                            lowres=[2, 3, 5],
                            search_factor=2.5,
                            printer=True):
        from caesar.zoom_funcs import construct_lowres_tree

        construct_lowres_tree(self, lowres)

        if self.obj_type == 'halo':
            halo = self
            ID = 'Halo {}'.format(self.GroupID)
        elif self.obj_type == 'galaxy':
            if self.halo is None:
                raise Exception('Galaxy {} has no halo!'.format(self.GroupID))
            halo = self.halo
            ID = 'Galaxy {}\'s halo (ID {})'.format(self.GroupID, halo.GroupID)

        r = halo.radii['virial'].d * search_factor

        result = self.obj._lowres['TREE'].query_ball_point(halo.pos.d, r)
        ncontam = len(result)
        lrmass = np.sum(self.obj._lowres['MASS'][result])

        self.contamination = lrmass / halo.masses['total'].d

        if not printer:
            return

        if ncontam > 0:
            mylog.warning('{} has {0.2f}% mass contamination '
                          '({} LR particles with {0.2e} {}s)'.format(
                              ID, self.contamination * 100.0, ncontam, lrmass,
                              halo.masses['total'].units))
        else:
            mylog.info('{} has NO contamination!'.format(ID))
Beispiel #25
0
    def _set_code_unit_attributes(self):
        if self.parameters["Opt__Unit"]:
            # GAMER units are always in CGS
            setdefaultattr(self, "length_unit",
                           self.quan(self.parameters["Unit_L"], "cm"))
            setdefaultattr(self, "mass_unit",
                           self.quan(self.parameters["Unit_M"], "g"))
            setdefaultattr(self, "time_unit",
                           self.quan(self.parameters["Unit_T"], "s"))

            if self.mhd:
                setdefaultattr(self, "magnetic_unit",
                               self.quan(self.parameters["Unit_B"], "gauss"))

        else:
            if len(self.units_override) == 0:
                mylog.warning("Cannot determine code units ==> "
                              "Use units_override to specify the units")

            for unit, value, cgs in [
                ("length", 1.0, "cm"),
                ("time", 1.0, "s"),
                ("mass", 1.0, "g"),
                ("magnetic", np.sqrt(4.0 * np.pi), "gauss"),
            ]:
                setdefaultattr(self, f"{unit}_unit", self.quan(value, cgs))

                if len(self.units_override) == 0:
                    mylog.warning("Assuming %8s unit = %f %s", unit, value,
                                  cgs)
Beispiel #26
0
    def _set_code_unit_attributes(self):
        if self.parameters['Opt__Unit']:
            # GAMER units are always in CGS
            setdefaultattr(self, 'length_unit',
                           self.quan(self.parameters['Unit_L'], 'cm'))
            setdefaultattr(self, 'mass_unit',
                           self.quan(self.parameters['Unit_M'], 'g'))
            setdefaultattr(self, 'time_unit',
                           self.quan(self.parameters['Unit_T'], 's'))

            if self.mhd:
                setdefaultattr(self, 'magnetic_unit',
                               self.quan(self.parameters['Unit_B'], 'gauss'))

        else:
            if len(self.units_override) == 0:
                mylog.warning("Cannot determine code units ==> " +
                              "Use units_override to specify the units")

            for unit, value, cgs in [("length", 1.0, "cm"), ("time", 1.0, "s"),
                                     ("mass", 1.0, "g"),
                                     ("magnetic", np.sqrt(4.0 * np.pi),
                                      "gauss")]:
                setdefaultattr(self, "%s_unit" % unit, self.quan(value, cgs))

                if len(self.units_override) == 0:
                    mylog.warning("Assuming %8s unit = %f %s", unit, value,
                                  cgs)
Beispiel #27
0
    def _set_units(self, ds, base_units):
        if ds is not None:
            if getattr(ds, "cosmological_simulation", False):
                self.hubble_constant = ds.hubble_constant
                self.current_redshift = ds.current_redshift
        attrs = (
            "length_unit",
            "mass_unit",
            "time_unit",
            "velocity_unit",
            "magnetic_unit",
        )
        cgs_units = ("cm", "g", "s", "cm/s", "gauss")
        for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units):
            if unit is None:
                if ds is not None:
                    u = getattr(ds, attr, None)
                elif attr == "velocity_unit":
                    u = self.length_unit / self.time_unit
                elif attr == "magnetic_unit":
                    u = np.sqrt(4.0 * np.pi * self.mass_unit /
                                (self.time_unit**2 * self.length_unit))
                else:
                    u = cgs_unit
            else:
                u = unit

            if isinstance(u, str):
                uq = YTQuantity(1.0, u)
            elif isinstance(u, numeric_type):
                uq = YTQuantity(u, cgs_unit)
            elif isinstance(u, YTQuantity):
                uq = u.copy()
            elif isinstance(u, tuple):
                uq = YTQuantity(u[0], u[1])
            else:
                uq = None

            if uq is not None and hasattr(self, "hubble_constant"):
                # Don't store cosmology units
                atoms = {str(a) for a in uq.units.expr.atoms()}
                if str(uq.units).startswith(
                        "cm") or "h" in atoms or "a" in atoms:
                    uq.convert_to_cgs()

            if uq is not None and uq.units.is_code_unit:
                mylog.warning(
                    "Cannot use code units of '%s' "
                    "when creating a FITSImageData instance! "
                    "Converting to a cgs equivalent.",
                    uq.units,
                )
                uq.convert_to_cgs()

            if attr == "length_unit" and uq.value != 1.0:
                mylog.warning("Converting length units from %s to %s.", uq,
                              uq.units)
                uq = YTQuantity(1.0, uq.units)

            setattr(self, attr, uq)
Beispiel #28
0
    def pixelize(self, dimension, data_source, field, bounds, size,
                 antialias = True, periodic = True):
        """
        Method for pixelizing datasets in preparation for
        two-dimensional image plots. Relies on several sampling
        routines written in cython
        """
        index = data_source.ds.index
        if (hasattr(index, 'meshes') and
           not isinstance(index.meshes[0], SemiStructuredMesh)):
            ftype, fname = field
            if ftype == "all":
                mesh_id = 0
                indices = np.concatenate([mesh.connectivity_indices for mesh in index.mesh_union])
            else:
                mesh_id = int(ftype[-1]) - 1
                indices = index.meshes[mesh_id].connectivity_indices

            coords = index.meshes[mesh_id].connectivity_coords
            offset = index.meshes[mesh_id]._index_offset
            ad = data_source.ds.all_data()
            field_data = ad[field]
            buff_size = size[0:dimension] + (1,) + size[dimension:]

            ax = data_source.axis
            xax = self.x_axis[ax]
            yax = self.y_axis[ax]
            c = np.float64(data_source.center[dimension].d)

            extents = np.zeros((3, 2))
            extents[ax] = np.array([c, c])
            extents[xax] = bounds[0:2]
            extents[yax] = bounds[2:4]

            # if this is an element field, promote to 2D here
            if len(field_data.shape) == 1:
                field_data = np.expand_dims(field_data, 1)
            # if this is a higher-order element, we demote to 1st order
            # here, for now.
            elif field_data.shape[1] == 27:
                # hexahedral
                mylog.warning("High order elements not yet supported, " +
                              "dropping to 1st order.")
                field_data = field_data[:, 0:8]
                indices = indices[:, 0:8]

            img = pixelize_element_mesh(coords,
                                        indices,
                                        buff_size, field_data, extents,
                                        index_offset=offset)

            # re-order the array and squeeze out the dummy dim
            return np.squeeze(np.transpose(img, (yax, xax, ax)))

        elif self.axis_id.get(dimension, dimension) < 3:
            return self._ortho_pixelize(data_source, field, bounds, size,
                                        antialias, dimension, periodic)
        else:
            return self._oblique_pixelize(data_source, field, bounds, size,
                                          antialias)
    def __init__(self, filename=None):

        default_filename = False
        if filename is None:
            filename = _get_data_file()
            default_filename = True

        if not os.path.exists(filename):
            mylog.warning("File %s does not exist, will attempt to find it." % filename)
            filename = _get_data_file(data_file=filename)
        only_on_root(mylog.info, "Loading emissivity data from %s." % filename)
        in_file = h5py.File(filename, "r")
        if "info" in in_file.attrs:
            only_on_root(mylog.info, in_file.attrs["info"])
        if default_filename and \
          in_file.attrs["version"] < xray_data_version:
            raise ObsoleteDataException()
        else:
            only_on_root(mylog.info, "X-ray emissivity data version: %s." % \
                         in_file.attrs["version"])

        for field in ["emissivity_primordial", "emissivity_metals",
                      "log_nH", "log_T", "log_E"]:
            if field in in_file:
                setattr(self, field, in_file[field][:])
        in_file.close()

        E_diff = np.diff(self.log_E)
        self.E_bins = \
                  YTArray(np.power(10, np.concatenate([self.log_E[:-1] - 0.5 * E_diff,
                                                      [self.log_E[-1] - 0.5 * E_diff[-1],
                                                       self.log_E[-1] + 0.5 * E_diff[-1]]])),
                          "keV")
        self.dnu = (np.diff(self.E_bins)/hcgs).in_units("Hz")
    def pixelize_line(self, field, start_point, end_point, npoints):
        """
        Method for sampling datasets along a line in preparation for
        one-dimensional line plots. For UnstructuredMesh, relies on a
        sampling routine written in cython
        """
        if npoints < 2:
            raise ValueError(
                "Must have at least two sample points in order to draw a line plot."
            )
        index = self.ds.index
        if hasattr(index, "meshes") and not isinstance(index.meshes[0],
                                                       SemiStructuredMesh):
            ftype, fname = field
            if ftype == "all":
                mesh_id = 0
                indices = np.concatenate(
                    [mesh.connectivity_indices for mesh in index.mesh_union])
            else:
                mesh_id = int(ftype[-1]) - 1
                indices = index.meshes[mesh_id].connectivity_indices

            coords = index.meshes[mesh_id].connectivity_coords
            if coords.shape[1] != end_point.size != start_point.size:
                raise ValueError("The coordinate dimension doesn't match the "
                                 "start and end point dimensions.")

            offset = index.meshes[mesh_id]._index_offset
            ad = self.ds.all_data()
            field_data = ad[field]

            # if this is an element field, promote to 2D here
            if len(field_data.shape) == 1:
                field_data = np.expand_dims(field_data, 1)
            # if this is a higher-order element, we demote to 1st order
            # here, for now.
            elif field_data.shape[1] == 27:
                # hexahedral
                mylog.warning(
                    "High order elements not yet supported, dropping to 1st order."
                )
                field_data = field_data[:, 0:8]
                indices = indices[:, 0:8]

            arc_length, plot_values = pixelize_element_mesh_line(
                coords,
                indices,
                start_point,
                end_point,
                npoints,
                field_data,
                index_offset=offset,
            )
            arc_length = YTArray(arc_length, start_point.units)
            plot_values = YTArray(plot_values, field_data.units)
        else:
            ray = self.ds.ray(start_point, end_point)
            arc_length, plot_values = _sample_ray(ray, npoints, field)
        return arc_length, plot_values
Beispiel #31
0
    def _parse_parameter_file(self):

        self._determine_structure()
        self._determine_axes()

        if self.parameter_filename.startswith("InMemory"):
            self.unique_identifier = time.time()
        else:
            self.unique_identifier = \
                int(os.stat(self.parameter_filename)[stat.ST_CTIME])

        # Determine dimensionality

        self.dimensionality = self.naxis
        self.geometry = "cartesian"

        # Sometimes a FITS file has a 4D datacube, in which case
        # we take the 4th axis and assume it consists of different fields.
        if self.dimensionality == 4:
            self.dimensionality = 3

        self._determine_wcs()

        self.domain_dimensions = np.array(self.dims)[:self.dimensionality]
        if self.dimensionality == 2:
            self.domain_dimensions = np.append(self.domain_dimensions,
                                               [int(1)])

        domain_left_edge = np.array([0.5] * 3)
        domain_right_edge = np.array(
            [float(dim) + 0.5 for dim in self.domain_dimensions])

        if self.dimensionality == 2:
            domain_left_edge[-1] = 0.5
            domain_right_edge[-1] = 1.5

        self.domain_left_edge = domain_left_edge
        self.domain_right_edge = domain_right_edge

        # Get the simulation time
        try:
            self.current_time = self.parameters["time"]
        except:
            mylog.warning("Cannot find time")
            self.current_time = 0.0
            pass

        # For now we'll ignore these
        self.periodicity = (False, ) * 3
        self.current_redshift = self.omega_lambda = self.omega_matter = \
            self.hubble_constant = self.cosmological_simulation = 0.0

        self._determine_nprocs()

        # Now we can set up some of our parameters for convenience.
        for k, v in self.primary_header.items():
            self.parameters[k] = v
        # Remove potential default keys
        self.parameters.pop('', None)
Beispiel #32
0
 def _guess_name_from_units(self, units):
     field_from_unit = {"Jy": "intensity",
                        "K": "temperature"}
     for k,v in field_from_unit.items():
         if k in units:
             mylog.warning("Guessing this is a %s field based on its units of %s." % (v,k))
             return v
     return None
Beispiel #33
0
 def _guess_name_from_units(self, units):
     for k, v in field_from_unit.items():
         if k in units:
             mylog.warning(
                 "Guessing this is a %s field based on its units of %s." %
                 (v, k))
             return v
     return None
Beispiel #34
0
    def setup_fluid_aliases(self, ftype="gas"):
        known_other_fields = dict(self.known_other_fields)

        # For non-Cartesian geometry, convert alias of vector fields to
        # curvilinear coordinates
        aliases_gallery = self.get_aliases_gallery()

        for field in sorted(self.field_list):
            if not isinstance(field, tuple):
                raise RuntimeError
            if field[0] in self.ds.particle_types:
                continue
            args = known_other_fields.get(field[1], ("", [], None))
            units, aliases, display_name = args
            # We allow field_units to override this.  First we check if the
            # field *name* is in there, then the field *tuple*.
            units = self.ds.field_units.get(field[1], units)
            units = self.ds.field_units.get(field, units)
            if not isinstance(units, str) and args[0] != "":
                units = f"(({args[0]})*{units})"
            if (
                isinstance(units, (numeric_type, np.number, np.ndarray))
                and args[0] == ""
                and units != 1.0
            ):
                mylog.warning(
                    "Cannot interpret units: %s * %s, setting to dimensionless.",
                    units,
                    args[0],
                )
                units = ""
            elif units == 1.0:
                units = ""
            self.add_output_field(
                field, sampling_type="cell", units=units, display_name=display_name
            )
            axis_names = self.ds.coordinates.axis_order
            for alias in aliases:
                if (
                    self.curvilinear
                ):  # For non-Cartesian geometry, convert vector aliases

                    if alias[-2:] not in ["_x", "_y", "_z"]:
                        to_convert = False
                    else:
                        for suffix in ["x", "y", "z"]:
                            if f"{alias[:-2]}_{suffix}" not in aliases_gallery:
                                to_convert = False
                                break
                        to_convert = True
                    if to_convert:
                        if alias[-2:] == "_x":
                            alias = f"{alias[:-2]}_{axis_names[0]}"
                        elif alias[-2:] == "_y":
                            alias = f"{alias[:-2]}_{axis_names[1]}"
                        elif alias[-2:] == "_z":
                            alias = f"{alias[:-2]}_{axis_names[2]}"
                self.alias((ftype, alias), field)
 def write_out(self, prefix='HMF', analytic=True, simulated=True):
     """
     Writes out the halo mass functions to file(s) with prefix *prefix*.
     """
     # First the analytic file, check that analytic fit exists and was requested
     if analytic:
         if self.make_analytic:
             fitname = prefix + '-analytic.dat'
             fp = open(fitname, "w")
             line = \
             "#Columns:\n" + \
             "#1. mass (M_solar)\n" + \
             "#2. cumulative number density of halos [comoving Mpc^-3]\n" + \
             "#3. (dn/dM)*dM (differential number density of halos) [comoving Mpc^-3]\n"
             fp.write(line)
             for i in range(self.masses_analytic.size - 1):
                 line = "%e\t%e\t%e\n" % (self.masses_analytic[i],
                 self.n_cumulative_analytic[i], 
                 self.dndM_dM_analytic[i])
                 fp.write(line)
             fp.close()
         # If the analytic halo mass function wasn't created, warn the user
         else:
             mylog.warning("The analytic halo mass function was not created and cannot be " +
                           "written out! Specify its creation with " +
                           "HaloMassFcn(make_analytic=True, other_args) when creating the " +
                           "HaloMassFcn object.")
     # Write out the simulated mass fucntion if it exists and was requested
     if simulated:
         if self.make_simulated:
             haloname = prefix + '-simulated.dat'
             fp = open(haloname, "w")
             line = \
             "#Columns:\n" + \
             "#1. mass [Msun]\n" + \
             "#2. cumulative number density of halos [comoving Mpc^-3]\n"
             fp.write(line)
             for i in range(self.masses_sim.size - 1):
                 line = "%e\t%e\n" % (self.masses_sim[i], 
                 self.n_cumulative_sim[i])
                 fp.write(line)
             fp.close()
         # If the simulated halo mass function wasn't created, warn the user
         else:
             mylog.warning("The simulated halo mass function was not created and cannot " +
                           "be written out! Specify its creation by providing a loaded " +
                           "halo dataset with HaloMassFcn(ds_halos=loaded_halo_dataset, " +
                           "other_args) when creating the HaloMassFcn object.")
 def update_all_headers(self, key, value):
     mylog.warning("update_all_headers is deprecated. "+
                   "Use update_header('all', key, value) instead.")
     self.update_header("all", key, value)
    def __init__(self, data, fields=None, units=None, width=None, wcs=None):
        r""" Initialize a FITSImageData object.

        FITSImageData contains a collection of FITS ImageHDU instances and
        WCS information, along with units for each of the images. FITSImageData
        instances can be constructed from ImageArrays, NumPy arrays, dicts 
        of such arrays, FixedResolutionBuffers, and YTCoveringGrids. The latter 
        two are the most powerful because WCS information can be constructed 
        automatically from their coordinates.

        Parameters
        ----------
        data : FixedResolutionBuffer or a YTCoveringGrid. Or, an
            ImageArray, an numpy.ndarray, or dict of such arrays
            The data to be made into a FITS image or images.
        fields : single string or list of strings, optional
            The field names for the data. If *fields* is none and *data* has
            keys, it will use these for the fields. If *data* is just a
            single array one field name must be specified.
        units : string
            The units of the WCS coordinates. Defaults to "cm".
        width : float or YTQuantity
            The width of the image. Either a single value or iterable of values.
            If a float, assumed to be in *units*. Only used if this information 
            is not already provided by *data*.
        wcs : `astropy.wcs.WCS` instance, optional
            Supply an AstroPy WCS instance. Will override automatic WCS
            creation from FixedResolutionBuffers and YTCoveringGrids.

        Examples
        --------

        >>> # This example uses a FRB.
        >>> ds = load("sloshing_nomag2_hdf5_plt_cnt_0150")
        >>> prj = ds.proj(2, "kT", weight_field="density")
        >>> frb = prj.to_frb((0.5, "Mpc"), 800)
        >>> # This example just uses the FRB and puts the coords in kpc.
        >>> f_kpc = FITSImageData(frb, fields="kT", units="kpc")
        >>> # This example specifies a specific WCS.
        >>> from astropy.wcs import WCS
        >>> w = WCS(naxis=self.dimensionality)
        >>> w.wcs.crval = [30., 45.] # RA, Dec in degrees
        >>> w.wcs.cunit = ["deg"]*2
        >>> nx, ny = 800, 800
        >>> w.wcs.crpix = [0.5*(nx+1), 0.5*(ny+1)]
        >>> w.wcs.ctype = ["RA---TAN","DEC--TAN"]
        >>> scale = 1./3600. # One arcsec per pixel
        >>> w.wcs.cdelt = [-scale, scale]
        >>> f_deg = FITSImageData(frb, fields="kT", wcs=w)
        >>> f_deg.writeto("temp.fits")
        """

        if units is None:
            units = "cm"
        if width is None:
            width = 1.0

        exclude_fields = ['x','y','z','px','py','pz',
                          'pdx','pdy','pdz','weight_field']

        super(FITSImageData, self).__init__()

        if isinstance(fields, string_types):
            fields = [fields]

        if hasattr(data, 'keys'):
            img_data = data
            if fields is None:
                fields = list(img_data.keys())
        elif isinstance(data, np.ndarray):
            if fields is None:
                mylog.warning("No field name given for this array. Calling it 'image_data'.")
                fn = 'image_data'
                fields = [fn]
            else:
                fn = fields[0]
            img_data = {fn: data}

        self.fields = []
        for fd in fields:
            if isinstance(fd, tuple):
                self.fields.append(fd[1])
            else:
                self.fields.append(fd)

        first = True
        self.field_units = {}
        for key in fields:
            if key not in exclude_fields:
                if hasattr(img_data[key], "units"):
                    self.field_units[key] = img_data[key].units
                else:
                    self.field_units[key] = "dimensionless"
                mylog.info("Making a FITS image of field %s" % key)
                if first:
                    hdu = pyfits.PrimaryHDU(np.array(img_data[key]))
                    first = False
                else:
                    hdu = pyfits.ImageHDU(np.array(img_data[key]))
                hdu.name = key
                hdu.header["btype"] = key
                if hasattr(img_data[key], "units"):
                    hdu.header["bunit"] = re.sub('()', '', str(img_data[key].units))
                self.append(hdu)

        self.shape = self[0].shape
        self.dimensionality = len(self.shape)

        if wcs is None:
            w = pywcs.WCS(header=self[0].header, naxis=self.dimensionality)
            if isinstance(img_data, FixedResolutionBuffer):
                # FRBs are a special case where we have coordinate
                # information, so we take advantage of this and
                # construct the WCS object
                dx = (img_data.bounds[1]-img_data.bounds[0]).in_units(units).v/self.shape[0]
                dy = (img_data.bounds[3]-img_data.bounds[2]).in_units(units).v/self.shape[1]
                xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0]).in_units(units).v
                yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2]).in_units(units).v
                center = [xctr, yctr]
                cdelt = [dx,dy]
            elif isinstance(img_data, YTCoveringGridBase):
                cdelt = img_data.dds.in_units(units).v
                center = 0.5*(img_data.left_edge+img_data.right_edge).in_units(units).v
            else:
                # If img_data is just an array, we assume the center is the origin
                # and use the image width to determine the cell widths
                if not iterable(width):
                    width = [width]*self.dimensionality
                if isinstance(width[0], YTQuantity):
                    cdelt = [wh.in_units(units).v/n for wh, n in zip(width, self.shape)]
                else:
                    cdelt = [float(wh)/n for wh, n in zip(width, self.shape)]
                center = [0.0]*self.dimensionality
            w.wcs.crpix = 0.5*(np.array(self.shape)+1)
            w.wcs.crval = center
            w.wcs.cdelt = cdelt
            w.wcs.ctype = ["linear"]*self.dimensionality
            w.wcs.cunit = [units]*self.dimensionality
            self.set_wcs(w)
        else:
            self.set_wcs(wcs)
    def _run(self, save_halos, save_catalog, njobs=-1, dynamic=False):
        r"""
        Run the requested halo analysis.

        Parameters
        ----------
        save_halos : bool
            If True, a list of all Halo objects is retained under the "halo_list"
            attribute.  If False, only the compiles quantities are saved under the
            "catalog" attribute.
        save_catalog : bool
            If True, save the final catalog to disk.
        njobs : int
            The number of jobs over which to divide halo analysis.  Choose -1
            to allocate one processor per halo.
            Default: -1
        dynamic : int
            If False, halo analysis is divided evenly between all available processors.
            If True, parallelism is performed via a task queue.
            Default: False

        See Also
        --------
        create, load

        """
        self.catalog = []
        if save_halos: self.halo_list = []

        if self.halos_ds is None:
            # Find the halos and make a dataset of them
            self.halos_ds = self.finder_method(self.data_ds)
            if self.halos_ds is None:
                mylog.warning('No halos were found for {0}'.format(\
                        self.data_ds.basename))
                if save_catalog:
                    self.halos_ds = self.data_ds
                    self.save_catalog()
                    self.halos_ds = None
                return
            self.halos_ds.index

            # Assign ds and data sources appropriately
            self.data_source = self.halos_ds.all_data()

            # Add all of the default quantities that all halos must have
            self.add_default_quantities('all')

        my_index = np.argsort(self.data_source["all", "particle_identifier"])
        for i in parallel_objects(my_index, njobs=njobs, dynamic=dynamic):
            new_halo = Halo(self)
            halo_filter = True
            for action_type, action in self.actions:
                if action_type == "callback":
                    action(new_halo)
                elif action_type == "filter":
                    halo_filter = action(new_halo)
                    if not halo_filter: break
                elif action_type == "quantity":
                    key, quantity = action
                    if quantity in self.halos_ds.field_info:
                        new_halo.quantities[key] = \
                          self.data_source[quantity][int(i)].in_cgs()
                    elif callable(quantity):
                        new_halo.quantities[key] = quantity(new_halo)
                else:
                    raise RuntimeError("Action must be a callback, filter, or quantity.")

            if halo_filter:
                self.catalog.append(new_halo.quantities)

            if save_halos and halo_filter:
                self.halo_list.append(new_halo)
            else:
                del new_halo

        self.catalog.sort(key=lambda a:a['particle_identifier'].to_ndarray())
        if save_catalog:
            self.save_catalog()
def add_xray_emissivity_field(ds, e_min, e_max,
                              filename=None,
                              with_metals=True,
                              constant_metallicity=None):
    r"""Create X-ray emissivity fields for a given energy range.

    Parameters
    ----------
    e_min: float
        the minimum energy in keV for the energy band.
    e_min: float
        the maximum energy in keV for the energy band.
    filename: string, optional
        Path to data file containing emissivity values.  If None,
        a file called "cloudy_emissivity.h5" is used, for photoionized
        plasmas. A second option, for collisionally ionized plasmas, is
        in the file "apec_emissivity.h5", available at http://yt-project.org/data.
        These files contain emissivity tables for primordial elements and
        for metals at solar metallicity for the energy range 0.1 to 100 keV.
        Default: None.
    with_metals: bool, optional
        If True, use the metallicity field to add the contribution from 
        metals.  If False, only the emission from H/He is considered.
        Default: True.
    constant_metallicity: float, optional
        If specified, assume a constant metallicity for the emission 
        from metals.  The *with_metals* keyword must be set to False 
        to use this.
        Default: None.

    This will create three fields:

    "xray_emissivity_{e_min}_{e_max}_keV" (erg s^-1 cm^-3)
    "xray_luminosity_{e_min}_{e_max}_keV" (erg s^-1)
    "xray_photon_emissivity_{e_min}_{e_max}_keV" (photons s^-1 cm^-3)

    Examples
    --------

    >>> from yt.mods import *
    >>> from yt.analysis_modules.spectral_integrator.api import *
    >>> ds = load(dataset)
    >>> add_xray_emissivity_field(ds, 0.5, 2)
    >>> p = ProjectionPlot(ds, 'x', "xray_emissivity_0.5_2_keV")
    >>> p.save()

    """

    if with_metals:
        try:
            ds._get_field_info("metal_density")
        except YTFieldNotFound:
            raise RuntimeError("Your dataset does not have a \"metal_density\" field! " +
                               "Perhaps you should specify a constant metallicity?")

    my_si = EmissivityIntegrator(filename=filename)

    em_0 = my_si.get_interpolator(my_si.emissivity_primordial, e_min, e_max)
    em_Z = None
    if with_metals or constant_metallicity is not None:
        em_Z = my_si.get_interpolator(my_si.emissivity_metals, e_min, e_max)

    energy_erg = np.power(10, my_si.log_E) * erg_per_keV
    emp_0 = my_si.get_interpolator((my_si.emissivity_primordial[..., :] / energy_erg),
                                   e_min, e_max)
    emp_Z = None
    if with_metals or constant_metallicity is not None:
        emp_Z = my_si.get_interpolator((my_si.emissivity_metals[..., :] / energy_erg),
                                       e_min, e_max)

    try:
        ds._get_field_info("H_number_density")
    except YTFieldNotFound:
        mylog.warning("Could not find a field for \"H_number_density\". Assuming primordial H " +
                      "mass fraction.")
        def _nh(field, data):
            return primordial_H_mass_fraction*data["gas","density"]/mp
        ds.add_field(("gas", "H_number_density"), function=_nh, units="cm**-3")

    def _emissivity_field(field, data):
        dd = {"log_nH" : np.log10(data["gas","H_number_density"]),
              "log_T"   : np.log10(data["gas","temperature"])}

        my_emissivity = np.power(10, em_0(dd))
        if em_Z is not None:
            if with_metals:
                my_Z = data["gas","metallicity"]
            elif constant_metallicity is not None:
                my_Z = constant_metallicity
            my_emissivity += my_Z * np.power(10, em_Z(dd))

        return data["gas","H_number_density"]**2 * \
            YTArray(my_emissivity, "erg*cm**3/s")

    emiss_name = "xray_emissivity_%s_%s_keV" % (e_min, e_max)
    ds.add_field(("gas", emiss_name), function=_emissivity_field,
                 display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
                 units="erg/cm**3/s")

    def _luminosity_field(field, data):
        return data[emiss_name] * data["cell_volume"]

    lum_name = "xray_luminosity_%s_%s_keV" % (e_min, e_max)
    ds.add_field(("gas", lum_name), function=_luminosity_field,
                 display_name=r"\rm{L}_{X}\ (%s-%s\ keV)" % (e_min, e_max),
                 units="erg/s")

    def _photon_emissivity_field(field, data):
        dd = {"log_nH" : np.log10(data["gas","H_number_density"]),
              "log_T"   : np.log10(data["gas","temperature"])}

        my_emissivity = np.power(10, emp_0(dd))
        if emp_Z is not None:
            if with_metals:
                my_Z = data["gas","metallicity"]
            elif constant_metallicity is not None:
                my_Z = constant_metallicity
            my_emissivity += my_Z * np.power(10, emp_Z(dd))

        return data["gas","H_number_density"]**2 * \
            YTArray(my_emissivity, "photons*cm**3/s")

    phot_name = "xray_photon_emissivity_%s_%s_keV" % (e_min, e_max)
    ds.add_field(("gas", phot_name), function=_photon_emissivity_field,
                 display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
                 units="photons/cm**3/s")

    return emiss_name, lum_name, phot_name