def save(self, name, mpl_kwargs=None, canvas=None):
        """Choose backend and save image to disk"""
        if mpl_kwargs is None:
            mpl_kwargs = {}
        if 'papertype' not in mpl_kwargs:
            mpl_kwargs['papertype'] = 'auto'

        suffix = get_image_suffix(name)
        if suffix == '':
            suffix = '.png'
            name = "%s%s" % (name, suffix)

        mylog.info("Saving plot %s", name)

        if suffix == ".png":
            canvas = FigureCanvasAgg(self.figure)
        elif suffix == ".pdf":
            canvas = FigureCanvasPdf(self.figure)
        elif suffix in (".eps", ".ps"):
            canvas = FigureCanvasPS(self.figure)
        else:
            mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
            canvas = self.canvas

        canvas.print_figure(name, **mpl_kwargs)
        return name
    def build_transfer_function(self):
        """
        Builds the transfer function according to the current state of the
        TransferFunctionHelper.

        Parameters
        ----------
        None

        Returns
        -------

        A ColorTransferFunction object.

        """
        if self.bounds is None:
            mylog.info("Calculating data bounds. This may take a while." + "  Set the .bounds to avoid this.")
            self.set_bounds()

        if self.log:
            mi, ma = np.log10(self.bounds[0]), np.log10(self.bounds[1])
        else:
            mi, ma = self.bounds
        self.tf = ColorTransferFunction((mi, ma), grey_opacity=self.grey_opacity, nbins=512)
        return self.tf
Ejemplo n.º 3
0
 def build_memmap(self):
     assert(self.size != -1)
     mylog.info('Building memmap with offset: %i and size %i' % (self._offset, self.size))
     self.handle = HTTPArray(self.filename, dtype=self.dtype,
                     shape=self.size, offset=self._offset)
     for k in self.dtype.names:
         self.data[k] = RedirectArray(self.handle, k)
Ejemplo n.º 4
0
def setup_counts_fields(ds, ebounds, ftype="gas"):
    r"""
    Create deposited image fields from X-ray count data in energy bands.

    Parameters
    ----------
    ds : Dataset
        The FITS events file dataset to add the counts fields to.
    ebounds : list of tuples
        A list of tuples, one for each field, with (emin, emax) as the
        energy bounds for the image.
    ftype : string, optional
        The field type of the resulting field. Defaults to "gas".

    Examples
    --------
    >>> ds = yt.load("evt.fits")
    >>> ebounds = [(0.1,2.0),(2.0,3.0)]
    >>> setup_counts_fields(ds, ebounds)
    """
    for (emin, emax) in ebounds:
        cfunc = _make_counts(emin, emax)
        fname = "counts_%s-%s" % (emin, emax)
        mylog.info("Creating counts field %s." % fname)
        ds.add_field((ftype,fname), function=cfunc,
                     units="counts/pixel",
                     validators = [ValidateSpatial()],
                     display_name="Counts (%s-%s keV)" % (emin, emax))
def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
    """
    This function is designed to output the parameters of the series
    of lines used to fit an absorption spectrum. 

    The dataset contains entries in the form species/N, species/b
    species/z, and species/complex. The ith entry in each of the datasets
    is the fitted parameter for the ith line fitted to the spectrum for
    the given species. The species names come from the fitted line
    dictionary.

    Parameters
    ----------
    lineDic : dictionary
        Dictionary of dictionaries representing the fit lines. 
        Top level keys are the species given in orderFits and the corresponding
        entries are dictionaries with the keys 'N','b','z', and 'group#'. 
        Each of these corresponds to a list of the parameters for every
        accepted fitted line. 
    fileName : string, optional
        Name of the file to output fit to. Default = 'spectrum_fit.h5'

    """
    f = h5py.File(file_name, 'w')
    for ion, params in lineDic.items():
        f.create_dataset("{0}/N".format(ion),data=params['N'])
        f.create_dataset("{0}/b".format(ion),data=params['b'])
        f.create_dataset("{0}/z".format(ion),data=params['z'])
        f.create_dataset("{0}/complex".format(ion),data=params['group#'])
    mylog.info('Writing spectrum fit to {0}'.format(file_name))
    f.close()
Ejemplo n.º 6
0
def sanitize_fits_unit(unit):
    if unit == "Mpc":
        mylog.info("Changing FITS file unit to kpc.")
        unit = "kpc"
    elif unit == "au":
        unit = "AU"
    return unit
 def find_children(self, min_val, max_val = None):
     if self.children is not None:
         mylog.info("Wiping out existing children clumps: %d.",
                    len(self.children))
     self.children = []
     if max_val is None: max_val = self.max_val
     nj, cids = identify_contours(self.data, self.field, min_val, max_val)
     # Here, cids is the set of slices and values, keyed by the
     # parent_grid_id, that defines the contours.  So we can figure out all
     # the unique values of the contours by examining the list here.
     unique_contours = set([])
     for sl_list in cids.values():
         for sl, ff in sl_list:
             unique_contours.update(np.unique(ff))
     contour_key = uuid.uuid4().hex
     base_object = getattr(self.data, 'base_object', self.data)
     add_contour_field(base_object.ds, contour_key)
     for cid in sorted(unique_contours):
         if cid == -1: continue
         new_clump = base_object.cut_region(
                 ["obj['contours_%s'] == %s" % (contour_key, cid)],
                 {('contour_slices_%s' % contour_key): cids})
         if new_clump["ones"].size == 0:
             # This is to skip possibly duplicate clumps.
             # Using "ones" here will speed things up.
             continue
         self.children.append(Clump(new_clump, self.field, parent=self,
                                    clump_info=self.clump_info,
                                    validators=self.validators))
    def save_catalog(self):
        "Write out hdf5 file with all halo quantities."

        filename = os.path.join(self.output_dir, "%s.%d.h5" %
                                (self.output_prefix, self.comm.rank))
        n_halos = len(self.catalog)
        mylog.info("Saving halo catalog (%d halos) to %s." %
                   (n_halos, os.path.join(self.output_dir,
                                         self.output_prefix)))
        out_file = h5py.File(filename, 'w')
        for attr in ["current_redshift", "current_time",
                     "domain_dimensions",
                     "cosmological_simulation", "omega_lambda",
                     "omega_matter", "hubble_constant"]:
            out_file.attrs[attr] = getattr(self.halos_ds, attr)
        for attr in ["domain_left_edge", "domain_right_edge"]:
            out_file.attrs[attr] = getattr(self.halos_ds, attr).in_cgs()
        out_file.attrs["data_type"] = "halo_catalog"
        out_file.attrs["num_halos"] = n_halos
        if n_halos > 0:
            field_data = np.empty(n_halos)
            for key in self.quantities:
                units = ""
                if hasattr(self.catalog[0][key], "units"):
                    units = str(self.catalog[0][key].units)
                for i in range(n_halos):
                    field_data[i] = self.catalog[i][key]
                dataset = out_file.create_dataset(str(key), data=field_data)
                dataset.attrs["units"] = units
        out_file.close()
Ejemplo n.º 9
0
def construct_image(ds, axis, data_source, center, width=None, image_res=None):
    if width is None:
        width = ds.domain_width[axis_wcs[axis]]
        unit = ds.get_smallest_appropriate_unit(width[0])
        mylog.info("Making an image of the entire domain, "+
                   "so setting the center to the domain center.")
    else:
        width = ds.coordinates.sanitize_width(axis, width, None)
        unit = str(width[0].units)
    if image_res is None:
        ddims = ds.domain_dimensions*ds.refine_by**ds.index.max_level
        if iterable(axis):
            nx = ddims.max()
            ny = ddims.max()
        else:
            nx, ny = [ddims[idx] for idx in axis_wcs[axis]]
    else:
        if iterable(image_res):
            nx, ny = image_res
        else:
            nx, ny = image_res, image_res
    dx, dy = width[0]/nx, width[1]/ny
    crpix = [0.5*(nx+1), 0.5*(ny+1)]
    if hasattr(ds, "wcs") and not iterable(axis):
        # This is a FITS dataset, so we use it to construct the WCS
        cunit = [str(ds.wcs.wcs.cunit[idx]) for idx in axis_wcs[axis]]
        ctype = [ds.wcs.wcs.ctype[idx] for idx in axis_wcs[axis]]
        cdelt = [ds.wcs.wcs.cdelt[idx] for idx in axis_wcs[axis]]
        ctr_pix = center.in_units("code_length")[:ds.dimensionality].v
        crval = ds.wcs.wcs_pix2world(ctr_pix.reshape(1, ds.dimensionality))[0]
        crval = [crval[idx] for idx in axis_wcs[axis]]
    else:
        if unit == "unitary":
            unit = ds.get_smallest_appropriate_unit(ds.domain_width.max())
        elif unit == "code_length":
            unit = ds.get_smallest_appropriate_unit(ds.quan(1.0,"code_length"))
        unit = sanitize_fits_unit(unit)
        cunit = [unit]*2
        ctype = ["LINEAR"]*2
        cdelt = [dx.in_units(unit)]*2
        if iterable(axis):
            crval = center.in_units(unit)
        else:
            crval = [center[idx].in_units(unit) for idx in axis_wcs[axis]]
    if hasattr(data_source, 'to_frb'):
        if iterable(axis):
            frb = data_source.to_frb(width[0], (nx, ny), height=width[1])
        else:
            frb = data_source.to_frb(width[0], (nx, ny), center=center, height=width[1])
    else:
        frb = None
    w = pywcs.WCS(naxis=2)
    w.wcs.crpix = crpix
    w.wcs.cdelt = cdelt
    w.wcs.crval = crval
    w.wcs.cunit = cunit
    w.wcs.ctype = ctype
    return w, frb
 def _write_spectrum_fits(self, filename):
     """
     Write spectrum to a fits file.
     """
     mylog.info("Writing spectrum to fits file: %s." % filename)
     col1 = pyfits.Column(name='wavelength', format='E', array=self.lambda_bins)
     col2 = pyfits.Column(name='flux', format='E', array=self.flux_field)
     cols = pyfits.ColDefs([col1, col2])
     tbhdu = pyfits.BinTableHDU.from_columns(cols)
     tbhdu.writeto(filename, clobber=True)
    def _write_spectrum_hdf5(self, filename):
        """
        Write spectrum to an hdf5 file.

        """
        mylog.info("Writing spectrum to hdf5 file: %s." % filename)
        output = h5py.File(filename, 'w')
        output.create_dataset('wavelength', data=self.lambda_bins)
        output.create_dataset('tau', data=self.tau_field)
        output.create_dataset('flux', data=self.flux_field)
        output.close()
 def _write_spectrum_ascii(self, filename):
     """
     Write spectrum to an ascii file.
     """
     mylog.info("Writing spectrum to ascii file: %s." % filename)
     f = open(filename, 'w')
     f.write("# wavelength[A] tau flux\n")
     for i in range(self.lambda_bins.size):
         f.write("%e %e %e\n" % (self.lambda_bins[i],
                                 self.tau_field[i], self.flux_field[i]))
     f.close()
 def _write_spectrum_line_list(self, filename):
     """
     Write out list of spectral lines.
     """
     mylog.info("Writing spectral line list: %s." % filename)
     self.spectrum_line_list.sort(key=lambda obj: obj['wavelength'])
     f = open(filename, 'w')
     f.write('#%-14s %-14s %-12s %-12s %-12s %-12s\n' %
             ('Wavelength', 'Line', 'N [cm^-2]', 'b [km/s]', 'z', 'v_pec [km/s]'))
     for line in self.spectrum_line_list:
         f.write('%-14.6f %-14ls %e %e %e %e.\n' % (line['wavelength'], line['label'],
                                             line['column_density'], line['b_thermal'],
                                             line['redshift'], line['v_pec']))
     f.close()
def _get_data_file(data_file=None):
    if data_file is None:
        data_file = "cloudy_emissivity.h5"
    data_url = "http://yt-project.org/data"
    if "YT_DEST" in os.environ and \
      os.path.isdir(os.path.join(os.environ["YT_DEST"], "data")):
        data_dir = os.path.join(os.environ["YT_DEST"], "data")
    else:
        data_dir = "."
    data_path = os.path.join(data_dir, data_file)
    if not os.path.exists(data_path):
        mylog.info("Attempting to download supplementary data from %s to %s." % 
                   (data_url, data_dir))
        fn = download_file(os.path.join(data_url, data_file), data_path)
        if fn != data_path:
            raise RuntimeError("Failed to download supplementary data.")
    return data_path
Ejemplo n.º 15
0
    def _save_light_cone_solution(self, filename="light_cone.dat"):
        "Write out a text file with information on light cone solution."

        mylog.info("Saving light cone solution to %s." % filename)

        f = open(filename, "w")
        f.write("# parameter_filename = %s\n" % self.parameter_filename)
        f.write("\n")
        f.write("# Slice    Dataset    Redshift    depth/box    " + \
                "width/degree    axis    center\n")
        for q, output in enumerate(self.light_cone_solution):
            f.write(("%04d %s %f %f %f %d %f %f %f\n") %
                    (q, output["filename"], output["redshift"],
                     output["box_depth_fraction"], output["box_width_per_angle"],
                     output["projection_axis"], output["projection_center"][0],
                     output["projection_center"][1], output["projection_center"][2]))
        f.close()
Ejemplo n.º 16
0
    def _save_light_cone_stack(self, field, weight_field,
                               pstack, wstack,
                               filename=None, attrs=None):
        "Save the light cone projection stack as a 3d array in and hdf5 file."

        if attrs is None:
            attrs = {}
        
        # Make list of redshifts to include as a dataset attribute.
        redshift_list = np.array([my_slice["redshift"] \
                                 for my_slice in self.light_cone_solution])

        field_node = "%s_%s" % (field, weight_field)
        weight_field_node = "weight_field_%s" % weight_field

        if (filename is None):
            filename = os.path.join(self.output_dir, "%s_data" % self.output_prefix)
        if not(filename.endswith(".h5")):
               filename += ".h5"

        if pstack.size == 0:
            mylog.info("save_light_cone_stack: light cone projection is empty.")
            return

        mylog.info("Writing light cone data to %s." % filename)

        fh = h5py.File(filename, "a")

        if field_node in fh:
            del fh[field_node]

        mylog.info("Saving %s to %s." % (field_node, filename))
        dataset = fh.create_dataset(field_node,
                                          data=pstack)
        dataset.attrs["units"] = str(pstack.units)
        dataset.attrs["redshifts"] = redshift_list
        dataset.attrs["observer_redshift"] = np.float(self.observer_redshift)
        for key, value in attrs.items():
            dataset.attrs[key] = value

        if wstack.size > 0:
            if weight_field_node in fh:
                del fh[weight_field_node]

            mylog.info("Saving %s to %s." % (weight_field_node, filename))
            dataset = fh.create_dataset(weight_field_node,
                                        data=wstack)
            dataset.attrs["units"] = str(wstack.units)
            dataset.attrs["redshifts"] = redshift_list
            dataset.attrs["observer_redshift"] = np.float(self.observer_redshift)
            for key, value in attrs.items():
                dataset.attrs[key] = value

        fh.close()
Ejemplo n.º 17
0
    def _write_light_ray(self, filename, data):
        """
        _write_light_ray(filename, data)

        Write light ray data to hdf5 file.
        """

        mylog.info("Saving light ray data to %s." % filename)
        output = h5py.File(filename, 'w')
        for field in data.keys():
            # if the field is a tuple, only use the second part of the tuple
            # in the hdf5 output (i.e. ('gas', 'density') -> 'density')
            if isinstance(field, tuple):
                fieldname = field[1]
            else:
                fieldname = field
            output.create_dataset(fieldname, data=data[field])
            output[fieldname].attrs["units"] = str(data[field].units)
        output.close()
Ejemplo n.º 18
0
    def _write_light_ray_solution(self, filename, extra_info=None):
        """
        _write_light_ray_solution(filename, extra_info=None)

        Write light ray solution to a file.
        """

        mylog.info("Writing light ray solution to %s." % filename)
        f = open(filename, 'w')
        if extra_info is not None:
            for par, val in extra_info.items():
                f.write("%s = %s\n" % (par, val))
        f.write("\nSegment Redshift dl/box    Start x       y             " + \
                "z             End x         y             z            Dataset\n")
        for q, my_segment in enumerate(self.light_ray_solution):
            f.write("%04d    %.6f %.6f % .10f % .10f % .10f % .10f % .10f % .10f %s\n" % \
                    (q, my_segment['redshift'], my_segment['traversal_box_fraction'],
                     my_segment['start'][0], my_segment['start'][1], my_segment['start'][2],
                     my_segment['end'][0], my_segment['end'][1], my_segment['end'][2],
                     my_segment['filename']))
        f.close()
Ejemplo n.º 19
0
    def _get_owls_ion_data_dir(self):

        txt = "Attempting to download ~ 30 Mb of owls ion data from %s to %s."
        data_file = "owls_ion_data.tar.gz"
        data_url = "http://yt-project.org/data"

        # get test_data_dir from yt config (ytcgf)
        #----------------------------------------------
        tdir = ytcfg.get("yt","test_data_dir")

        # set download destination to tdir or ./ if tdir isnt defined
        #----------------------------------------------
        if tdir == "/does/not/exist":
            data_dir = "./"
        else:
            data_dir = tdir            


        # check for owls_ion_data directory in data_dir
        # if not there download the tarball and untar it
        #----------------------------------------------
        owls_ion_path = os.path.join( data_dir, "owls_ion_data" )

        if not os.path.exists(owls_ion_path):
            mylog.info(txt % (data_url, data_dir))                    
            fname = data_dir + "/" + data_file
            fn = download_file(os.path.join(data_url, data_file), fname)

            cmnd = "cd " + data_dir + "; " + "tar xf " + data_file
            os.system(cmnd)


        if not os.path.exists(owls_ion_path):
            raise RuntimeError("Failed to download owls ion data.")

        return owls_ion_path
Ejemplo n.º 20
0
    def _parse_parameter_file(self):
        """Parse input datfile's header. Apply geometry_override if specified."""
        # required method
        self.unique_identifier = int(
            os.stat(self.parameter_filename)[stat.ST_CTIME])

        # populate self.parameters with header data
        with open(self.parameter_filename, 'rb') as istream:
            self.parameters.update(get_header(istream))

        self.current_time = self.parameters['time']
        self.dimensionality = self.parameters['ndim']

        # force 3D for this definition
        dd = np.ones(3, dtype="int64")
        dd[:self.dimensionality] = self.parameters['domain_nx']
        self.domain_dimensions = dd

        if self.parameters.get("staggered", False):
            mylog.warning(
                "'staggered' flag was found, but is currently ignored (unsupported)"
            )

        # parse geometry
        # by order of decreasing priority, we use
        # - geometry_override
        # - "geometry" parameter from datfile
        # - if all fails, default to "cartesian"
        self.geometry = None
        amrvac_geom = self.parameters.get("geometry", None)
        if amrvac_geom is not None:
            self.geometry = self._parse_geometry(amrvac_geom)
        elif self.parameters["datfile_version"] > 4:
            # py38: walrus here
            mylog.error(
                "No 'geometry' flag found in datfile with version %d >4." %
                self.parameters["datfile_version"])

        if self._geometry_override is not None:
            # py38: walrus here
            try:
                new_geometry = self._parse_geometry(self._geometry_override)
                if new_geometry == self.geometry:
                    mylog.info(
                        "geometry_override is identical to datfile parameter.")
                else:
                    self.geometry = new_geometry
                    mylog.warning(
                        "Overriding geometry, this may lead to surprising results."
                    )
            except ValueError:
                mylog.error(
                    "Unable to parse geometry_override '%s' (will be ignored)."
                    % self._geometry_override)

        if self.geometry is None:
            mylog.warning(
                "No geometry parameter supplied or found, defaulting to cartesian."
            )
            self.geometry = "cartesian"

        # parse peridiocity
        per = self.parameters.get("periodic", np.array([False, False, False]))
        missing_dim = 3 - len(per)
        self.periodicity = np.append(per, [False] * missing_dim)

        self.gamma = self.parameters.get("gamma", 5.0 / 3.0)

        # parse domain edges
        dle = np.zeros(3)
        dre = np.ones(3)
        dle[:self.dimensionality] = self.parameters['xmin']
        dre[:self.dimensionality] = self.parameters['xmax']
        self.domain_left_edge = dle
        self.domain_right_edge = dre

        # defaulting to non-cosmological
        self.cosmological_simulation = 0
        self.current_redshift = 0.0
        self.omega_matter = 0.0
        self.omega_lambda = 0.0
        self.hubble_constant = 0.0
Ejemplo n.º 21
0
    def save(self, fname=None, sigma_clip=None):
        r"""Saves the most recently rendered image of the Scene to disk.

        Once you have created a scene and rendered that scene to an image
        array, this saves that image array to disk with an optional filename.
        If an image has not yet been rendered for the current scene object,
        it forces one and writes it out.

        Parameters
        ----------
        fname: string, optional
            If specified, save the rendering as to the file "fname".
            If unspecified, it creates a default based on the dataset filename.
            The file format is inferred from the filename's suffix. Supported
            fomats are png, pdf, eps, and ps.
            Default: None
        sigma_clip: float, optional
            Image values greater than this number times the standard deviation
            plus the mean of the image will be clipped before saving. Useful
            for enhancing images as it gets rid of rare high pixel values.
            Default: None

            floor(vals > std_dev*sigma_clip + mean)

        Returns
        -------
            Nothing

        Examples
        --------

        >>> import yt
        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
        >>>
        >>> sc = yt.create_scene(ds)
        >>> # Modify camera, sources, etc...
        >>> sc.render()
        >>> sc.save('test.png', sigma_clip=4)

        Or alternatively:

        >>> import yt
        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
        >>>
        >>> sc = yt.create_scene(ds)
        >>> # save with different sigma clipping values
        >>> sc.save('raw.png')
        >>> sc.save('clipped_2.png', sigma_clip=2)
        >>> sc.save('clipped_4.png', sigma_clip=4)

        """
        if fname is None:
            sources = list(self.sources.values())
            rensources = [s for s in sources if isinstance(s, RenderSource)]
            # if a volume source present, use its affiliated ds for fname
            if len(rensources) > 0:
                rs = rensources[0]
                basename = rs.data_source.ds.basename
                if isinstance(rs.field, str):
                    field = rs.field
                else:
                    field = rs.field[-1]
                fname = "%s_Render_%s.png" % (basename, field)
            # if no volume source present, use a default filename
            else:
                fname = "Render_opaque.png"
        suffix = get_image_suffix(fname)
        if suffix == '':
            suffix = '.png'
            fname = '%s%s' % (fname, suffix)

        self.render()

        mylog.info("Saving render %s", fname)
        # We can render pngs natively but for other formats we defer to
        # matplotlib.
        if suffix == '.png':
            self._last_render.write_png(fname, sigma_clip=sigma_clip)
        else:
            from matplotlib.figure import Figure
            from matplotlib.backends.backend_pdf import \
                FigureCanvasPdf
            from matplotlib.backends.backend_ps import \
                FigureCanvasPS
            shape = self._last_render.shape
            fig = Figure((shape[0] / 100., shape[1] / 100.))
            if suffix == '.pdf':
                canvas = FigureCanvasPdf(fig)
            elif suffix in ('.eps', '.ps'):
                canvas = FigureCanvasPS(fig)
            else:
                raise NotImplementedError(
                    "Unknown file suffix '{}'".format(suffix))
            ax = fig.add_axes([0, 0, 1, 1])
            ax.set_axis_off()
            out = self._last_render
            nz = out[:, :, :3][out[:, :, :3].nonzero()]
            max_val = nz.mean() + sigma_clip * nz.std()
            alpha = 255 * out[:, :, 3].astype('uint8')
            out = np.clip(out[:, :, :3] / max_val, 0.0, 1.0) * 255
            out = np.concatenate([out.astype('uint8'), alpha[..., None]],
                                 axis=-1)
            # not sure why we need rot90, but this makes the orientation
            # match the png writer
            ax.imshow(np.rot90(out), origin='lower')
            canvas.print_figure(fname, dpi=100)
Ejemplo n.º 22
0
    def _validate_parent_children_relationship(self):
        mylog.info("Validating the parent-children relationship ...")

        father_list = self._handle["Tree/Father"][()]

        for grid in self.grids:
            # parent->children == itself
            if grid.Parent is not None:
                assert (grid in grid.Parent.Children
                        ), "Grid %d, Parent %d, Parent->Children[0] %d" % (
                            grid.id,
                            grid.Parent.id,
                            grid.Parent.Children[0].id,
                        )

            # children->parent == itself
            for c in grid.Children:
                assert c.Parent is grid, "Grid %d, Children %d, Children->Parent %d" % (
                    grid.id,
                    c.id,
                    c.Parent.id,
                )

            # all refinement grids should have parent
            if grid.Level > 0:
                assert (
                    grid.Parent is not None and
                    grid.Parent.id >= 0), "Grid %d, Level %d, Parent %d" % (
                        grid.id,
                        grid.Level,
                        grid.Parent.id if grid.Parent is not None else -999,
                    )

            # parent index is consistent with the loaded dataset
            if grid.Level > 0:
                father_gid = father_list[grid.id * self.pgroup] // self.pgroup
                assert (
                    father_gid == grid.Parent.id
                ), "Grid %d, Level %d, Parent_Found %d, Parent_Expect %d" % (
                    grid.id,
                    grid.Level,
                    grid.Parent.id,
                    father_gid,
                )

            # edges between children and parent
            for c in grid.Children:
                for d in range(0, 3):
                    msgL = (
                        "Grid %d, Child %d, Grid->EdgeL %14.7e, Children->EdgeL %14.7e"
                        % (grid.id, c.id, grid.LeftEdge[d], c.LeftEdge[d]))
                    msgR = (
                        "Grid %d, Child %d, Grid->EdgeR %14.7e, Children->EdgeR %14.7e"
                        % (grid.id, c.id, grid.RightEdge[d], c.RightEdge[d]))
                    if not grid.LeftEdge[d] <= c.LeftEdge[d]:
                        raise ValueError(msgL)

                    if not grid.RightEdge[d] >= c.RightEdge[d]:
                        raise ValueError(msgR)

        mylog.info("Check passed")
Ejemplo n.º 23
0
    def __init__(self, filename, skip_hash_check=False):
        self._ds = None
        self.data_file = os.path.abspath(filename)
        self.skip_hash_check=skip_hash_check

        self._halo_dmlist = LazyDataset(self, 'halo_data/lists/dmlist')
        self._halo_slist = LazyDataset(self, 'halo_data/lists/slist')
        self._halo_glist = LazyDataset(self, 'halo_data/lists/glist')
        self._halo_bhlist = LazyDataset(self, 'halo_data/lists/bhlist')
        self._halo_dlist = LazyDataset(self, 'halo_data/lists/dlist')

        self._galaxy_slist = LazyDataset(self, 'galaxy_data/lists/slist')
        self._galaxy_glist = LazyDataset(self, 'galaxy_data/lists/glist')
        self._galaxy_bhlist = LazyDataset(self, 'galaxy_data/lists/bhlist')
        self._galaxy_dlist = LazyDataset(self, 'galaxy_data/lists/dlist')

        self._cloud_glist = LazyDataset(self, 'cloud_data/lists/glist')
        self._cloud_dlist = LazyDataset(self, 'cloud_data/lists/dlist')

        with h5py.File(filename, 'r') as hd:
            mylog.info('Opening {}'.format(filename))

            if 'hash' in hd.attrs:
                self.hash = hd.attrs['hash']
            else:
                self.hash = None
            if isinstance(self.hash, np.bytes_):
                self.hash = self.hash.decode('utf8')

            # This should probably be caesar_version or something
            self.caesar = hd.attrs['caesar']

            self.unit_registry = UnitRegistry.from_json(
                hd.attrs['unit_registry_json'].decode('utf8'))

            # Load the information about the simulation itself
            self.simulation = SimulationAttributes()
            self.simulation._unpack(self, hd)

            # Halo data is loaded unconditionally, AFAICT it's always present
            self._galaxy_index_list = None
            if 'halo_data/lists/galaxy_index_list' in hd:
                self._galaxy_index_list = LazyDataset(
                    self, 'halo_data/lists/galaxy_index_list')

            self._halo_data = {}
            for k, v in hd['halo_data'].items():
                if type(v) is h5py.Dataset:
                    self._halo_data[k] = LazyDataset(self, 'halo_data/' + k)

            self._halo_dicts = defaultdict(dict)
            for k in hd['halo_data/dicts']:
                dictname, arrname = k.split('.')
                self._halo_dicts[dictname][arrname] = LazyDataset(
                    self, 'halo_data/dicts/' + k)

            self.nhalos = hd.attrs['nhalos']
            self.halos = LazyList(self.nhalos, lambda i: Halo(self, i))
            mylog.info('Found {} halos'.format(len(self.halos)))

            # Provide default values for everything, so that if a simulation
            # without galaxies is loaded we get zero galaxies, not AttributeErrors
            self._galaxy_data = {}
            self._galaxy_dicts = defaultdict(dict)
            self.ngalaxies = 0
            self.galaxies = LazyList(self.ngalaxies, lambda i: Galaxy(self, i))
            if 'galaxy_data' in hd:
                self._cloud_index_list = None
                if 'galaxy_data/lists/cloud_index_list' in hd:
                    self._cloud_index_list = LazyDataset(
                        self, 'galaxy_data/lists/cloud_index_list')

                if 'tree_data/progen_galaxy_star' in hd:
                    self._galaxy_data['progen_galaxy_star'] = self._progen_galaxy_star = LazyDataset(
                        self, 'tree_data/progen_galaxy_star')
                    
                if 'tree_data/descend_galaxy_star' in hd:
                    self._galaxy_data['descend_galaxy_star'] = self._descend_galaxy_star = LazyDataset(
                        self, 'tree_data/descend_galaxy_star')

                for k, v in hd['galaxy_data'].items():
                    if type(v) is h5py.Dataset:
                        self._galaxy_data[k] = LazyDataset(
                            self, 'galaxy_data/' + k)

                for k in hd['galaxy_data/dicts']:
                    dictname, arrname = k.split('.')
                    self._galaxy_dicts[dictname][arrname] = LazyDataset(
                        self, 'galaxy_data/dicts/' + k)

                self.ngalaxies = hd.attrs['ngalaxies']
                self.galaxies = LazyList(self.ngalaxies,
                                         lambda i: Galaxy(self, i))
                mylog.info('Found {} galaxies'.format(len(self.galaxies)))

            self._cloud_data = {}
            self._cloud_dicts = defaultdict(dict)
            self.nclouds = 0
            self.clouds = LazyList(self.nclouds, lambda i: Cloud(self, i))
            if 'cloud_data' in hd:
                for k, v in hd['cloud_data'].items():
                    if type(v) is h5py.Dataset:
                        self._cloud_data[k] = LazyDataset(
                            self, 'cloud_data/' + k)

                for k in hd['cloud_data/dicts']:
                    dictname, arrname = k.split('.')
                    self._cloud_dicts[dictname][arrname] = LazyDataset(
                        self, 'cloud_data/dicts/' + k)

                self.nclouds = hd.attrs['nclouds']
                self.clouds = LazyList(self.nclouds, lambda i: Cloud(self, i))
                mylog.info('Found {} clouds'.format(len(self.clouds)))
Ejemplo n.º 24
0
def _future_bound(
        clump,
        use_thermal_energy=True,
        truncate=True,
        include_cooling=True):
    """
    True if clump has negative total energy. This considers gas kinetic
    energy, thermal energy, and radiative losses over a free-fall time
    against gravitational potential energy of the gas and collisionless
    particle system.
    """

    num_threads = int(os.environ.get('OMP_NUM_THREADS', 1))

    if clump["gas", "cell_mass"].size <= 1:
        mylog.info("Clump has only one cell.")
        return False

    bulk_velocity = clump.quantities.bulk_velocity(
        use_particles=False)

    kinetic = 0.5 * (clump["gas", "cell_mass"] *
        ((bulk_velocity[0] - clump["gas", "velocity_x"])**2 +
         (bulk_velocity[1] - clump["gas", "velocity_y"])**2 +
         (bulk_velocity[2] - clump["gas", "velocity_z"])**2)).sum()

    mylog.info("Kinetic energy: %e erg." %
               kinetic.in_units("erg"))

    if use_thermal_energy:
        cooling_loss = clump.data.ds.quan(0.0, "erg")
        thermal = (clump["gas", "cell_mass"] *
                   clump["gas", "thermal_energy"]).sum()
        mylog.info("Thermal energy: %e erg." %
                   thermal.in_units("erg"))

        if include_cooling:
            # divide by sqrt(2) since t_ff = t_dyn / sqrt(2)
            cooling_loss = \
                (clump["gas", "cell_mass"] *
                 clump["gas", "dynamical_time"] *
                 clump["gas", "thermal_energy"] /
                 clump["gas", "cooling_time"]).sum() / np.sqrt(2)
            mylog.info("Cooling loss: %e erg." %
                       cooling_loss.in_units("erg"))

        thermal -= np.abs(cooling_loss)
        kinetic += thermal
        kinetic = max(kinetic, clump.data.ds.quan(0.0, "erg"))

    mylog.info("Available energy: %e erg." %
               kinetic.in_units("erg"))

    m = np.concatenate([clump["gas", "cell_mass"].in_cgs(),
                        clump["all", "particle_mass"].in_cgs()])
    px = np.concatenate([clump["index", "x"].in_cgs(),
                         clump["all", "particle_position_x"].in_cgs()])
    py = np.concatenate([clump["index", "y"].in_cgs(),
                         clump["all", "particle_position_y"].in_cgs()])
    pz = np.concatenate([clump["index", "z"].in_cgs(),
                         clump["all", "particle_position_z"].in_cgs()])

    potential = clump.data.ds.quan(
        G * gravitational_binding_energy(
            m, px, py, pz,
            truncate, (kinetic / G).in_cgs(),
            num_threads=num_threads),
        kinetic.in_cgs().units)

    mylog.info("Potential energy: %e erg." %
               potential.to('erg'))

    return potential >= kinetic
Ejemplo n.º 25
0
    def __init__(self, data, fields=None, units=None, width=None, wcs=None):
        r""" Initialize a FITSImageData object.

        FITSImageData contains a collection of FITS ImageHDU instances and
        WCS information, along with units for each of the images. FITSImageData
        instances can be constructed from ImageArrays, NumPy arrays, dicts 
        of such arrays, FixedResolutionBuffers, and YTCoveringGrids. The latter 
        two are the most powerful because WCS information can be constructed 
        automatically from their coordinates.

        Parameters
        ----------
        data : FixedResolutionBuffer or a YTCoveringGrid. Or, an
            ImageArray, an numpy.ndarray, or dict of such arrays
            The data to be made into a FITS image or images.
        fields : single string or list of strings, optional
            The field names for the data. If *fields* is none and *data* has
            keys, it will use these for the fields. If *data* is just a
            single array one field name must be specified.
        units : string
            The units of the WCS coordinates. Defaults to "cm".
        width : float or YTQuantity
            The width of the image. Either a single value or iterable of values.
            If a float, assumed to be in *units*. Only used if this information 
            is not already provided by *data*.
        wcs : `astropy.wcs.WCS` instance, optional
            Supply an AstroPy WCS instance. Will override automatic WCS
            creation from FixedResolutionBuffers and YTCoveringGrids.

        Examples
        --------

        >>> # This example uses a FRB.
        >>> ds = load("sloshing_nomag2_hdf5_plt_cnt_0150")
        >>> prj = ds.proj(2, "kT", weight_field="density")
        >>> frb = prj.to_frb((0.5, "Mpc"), 800)
        >>> # This example just uses the FRB and puts the coords in kpc.
        >>> f_kpc = FITSImageData(frb, fields="kT", units="kpc")
        >>> # This example specifies a specific WCS.
        >>> from astropy.wcs import WCS
        >>> w = WCS(naxis=self.dimensionality)
        >>> w.wcs.crval = [30., 45.] # RA, Dec in degrees
        >>> w.wcs.cunit = ["deg"]*2
        >>> nx, ny = 800, 800
        >>> w.wcs.crpix = [0.5*(nx+1), 0.5*(ny+1)]
        >>> w.wcs.ctype = ["RA---TAN","DEC--TAN"]
        >>> scale = 1./3600. # One arcsec per pixel
        >>> w.wcs.cdelt = [-scale, scale]
        >>> f_deg = FITSImageData(frb, fields="kT", wcs=w)
        >>> f_deg.writeto("temp.fits")
        """

        if units is None:
            units = "cm"
        if width is None:
            width = 1.0

        exclude_fields = ['x','y','z','px','py','pz',
                          'pdx','pdy','pdz','weight_field']

        super(FITSImageData, self).__init__()

        if isinstance(fields, string_types):
            fields = [fields]

        if hasattr(data, 'keys'):
            img_data = data
            if fields is None:
                fields = list(img_data.keys())
        elif isinstance(data, np.ndarray):
            if fields is None:
                mylog.warning("No field name given for this array. Calling it 'image_data'.")
                fn = 'image_data'
                fields = [fn]
            else:
                fn = fields[0]
            img_data = {fn: data}

        self.fields = []
        for fd in fields:
            if isinstance(fd, tuple):
                self.fields.append(fd[1])
            else:
                self.fields.append(fd)

        first = True
        self.field_units = {}
        for key in fields:
            if key not in exclude_fields:
                if hasattr(img_data[key], "units"):
                    self.field_units[key] = img_data[key].units
                else:
                    self.field_units[key] = "dimensionless"
                mylog.info("Making a FITS image of field %s" % key)
                if first:
                    hdu = pyfits.PrimaryHDU(np.array(img_data[key]))
                    first = False
                else:
                    hdu = pyfits.ImageHDU(np.array(img_data[key]))
                hdu.name = key
                hdu.header["btype"] = key
                if hasattr(img_data[key], "units"):
                    hdu.header["bunit"] = re.sub('()', '', str(img_data[key].units))
                self.append(hdu)

        self.shape = self[0].shape
        self.dimensionality = len(self.shape)

        if wcs is None:
            w = pywcs.WCS(header=self[0].header, naxis=self.dimensionality)
            if isinstance(img_data, FixedResolutionBuffer):
                # FRBs are a special case where we have coordinate
                # information, so we take advantage of this and
                # construct the WCS object
                dx = (img_data.bounds[1]-img_data.bounds[0]).in_units(units).v/self.shape[0]
                dy = (img_data.bounds[3]-img_data.bounds[2]).in_units(units).v/self.shape[1]
                xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0]).in_units(units).v
                yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2]).in_units(units).v
                center = [xctr, yctr]
                cdelt = [dx,dy]
            elif isinstance(img_data, YTCoveringGridBase):
                cdelt = img_data.dds.in_units(units).v
                center = 0.5*(img_data.left_edge+img_data.right_edge).in_units(units).v
            else:
                # If img_data is just an array, we assume the center is the origin
                # and use the image width to determine the cell widths
                if not iterable(width):
                    width = [width]*self.dimensionality
                if isinstance(width[0], YTQuantity):
                    cdelt = [wh.in_units(units).v/n for wh, n in zip(width, self.shape)]
                else:
                    cdelt = [float(wh)/n for wh, n in zip(width, self.shape)]
                center = [0.0]*self.dimensionality
            w.wcs.crpix = 0.5*(np.array(self.shape)+1)
            w.wcs.crval = center
            w.wcs.cdelt = cdelt
            w.wcs.ctype = ["linear"]*self.dimensionality
            w.wcs.cunit = [units]*self.dimensionality
            self.set_wcs(w)
        else:
            self.set_wcs(wcs)
Ejemplo n.º 26
0
    def calculate_light_cone_solution(self, seed=None, filename=None):
        r"""Create list of projections to be added together to make the light cone.

        Several sentences providing an extended description. Refer to
        variables using back-ticks, e.g. `var`.

        Parameters
        ----------
        seed : int
            The seed for the random number generator.  Any light cone solution
            can be reproduced by giving the same random seed.  Default: None
            (each solution will be distinct).
        filename : string
            If given, a text file detailing the solution will be written out.
            Default: None.

        """

        # Don"t use box coherence with maximum projection depths.
        if self.use_minimum_datasets and \
                self.minimum_coherent_box_fraction > 0:
            mylog.info("Setting minimum_coherent_box_fraction to 0 with " +
                       "minimal light cone.")
            self.minimum_coherent_box_fraction = 0

        # Calculate projection sizes, and get
        # random projection axes and centers.
        seed = int(seed)
        np.random.seed(seed)

        # For box coherence, keep track of effective depth travelled.
        box_fraction_used = 0.0

        for q in range(len(self.light_cone_solution)):
            if "previous" in self.light_cone_solution[q]:
                del self.light_cone_solution[q]["previous"]
            if "next" in self.light_cone_solution[q]:
                del self.light_cone_solution[q]["next"]
            if q == len(self.light_cone_solution) - 1:
                z_next = self.near_redshift
            else:
                z_next = self.light_cone_solution[q+1]["redshift"]

            # Calculate fraction of box required for a depth of delta z
            self.light_cone_solution[q]["box_depth_fraction"] = \
                (self.cosmology.comoving_radial_distance(z_next, \
                        self.light_cone_solution[q]["redshift"]) / \
                        self.simulation.box_size).in_units("")

            # Calculate fraction of box required for width corresponding to
            # requested image size.
            proper_box_size = self.simulation.box_size / \
              (1.0 + self.light_cone_solution[q]["redshift"])
            self.light_cone_solution[q]["box_width_per_angle"] = \
              (self.cosmology.angular_scale(self.observer_redshift,
               self.light_cone_solution[q]["redshift"]) /
               proper_box_size).in_units("1 / degree")

            # Simple error check to make sure more than 100% of box depth
            # is never required.
            if self.light_cone_solution[q]["box_depth_fraction"] > 1.0:
                mylog.error(("Warning: box fraction required to go from " +
                             "z = %f to %f is %f") %
                            (self.light_cone_solution[q]["redshift"], z_next,
                             self.light_cone_solution[q]["box_depth_fraction"]))
                mylog.error(("Full box delta z is %f, but it is %f to the " +
                             "next data dump.") %
                            (self.light_cone_solution[q]["dz_max"],
                             self.light_cone_solution[q]["redshift"]-z_next))

            # Get projection axis and center.
            # If using box coherence, only get random axis and center if enough
            # of the box has been used, or if box_fraction_used will be greater
            # than 1 after this slice.
            if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
              (box_fraction_used > self.minimum_coherent_box_fraction) or \
              (box_fraction_used +
               self.light_cone_solution[q]["box_depth_fraction"] > 1.0):
                # Random axis and center.
                self.light_cone_solution[q]["projection_axis"] = \
                  np.random.randint(0, 3)
                self.light_cone_solution[q]["projection_center"] = \
                  np.random.random(3)
                box_fraction_used = 0.0
            else:
                # Same axis and center as previous slice,
                # but with depth center shifted.
                self.light_cone_solution[q]["projection_axis"] = \
                  self.light_cone_solution[q-1]["projection_axis"]
                self.light_cone_solution[q]["projection_center"] = \
                  self.light_cone_solution[q-1]["projection_center"].copy()
                self.light_cone_solution[q]["projection_center"]\
                  [self.light_cone_solution[q]["projection_axis"]] += \
                    0.5 * (self.light_cone_solution[q]["box_depth_fraction"] +
                           self.light_cone_solution[q-1]["box_depth_fraction"])
                if self.light_cone_solution[q]["projection_center"]\
                  [self.light_cone_solution[q]["projection_axis"]] >= 1.0:
                    self.light_cone_solution[q]["projection_center"]\
                      [self.light_cone_solution[q]["projection_axis"]] -= 1.0

            box_fraction_used += self.light_cone_solution[q]["box_depth_fraction"]

        # Write solution to a file.
        if filename is not None:
            self._save_light_cone_solution(filename=filename)
Ejemplo n.º 27
0
    def _parse_parameter_file(self):
        # required method
        self.unique_identifier = int(
            os.stat(self.parameter_filename)[stat.ST_CTIME])

        # populate self.parameters with header data
        with open(self.parameter_filename, 'rb') as istream:
            self.parameters.update(get_header(istream))

        self.current_time = self.parameters['time']
        self.dimensionality = self.parameters['ndim']

        # force 3D for this definition
        dd = np.ones(3, dtype="int64")
        dd[:self.dimensionality] = self.parameters['domain_nx']
        self.domain_dimensions = dd

        # the following parameters may not be present in the datfile,
        # dependending on format version
        if self.parameters["datfile_version"] < 5:
            mylog.warning(
                "This data format does not contain geometry or periodicity info"
            )
        if self.parameters.get("staggered", False):
            mylog.warning(
                "'staggered' flag was found, but is currently ignored (unsupported)"
            )

        # parse geometry
        # by order of decreasing priority, we use
        # - geometry_override
        # - "geometry" parameter from datfile
        # - if all fails, default to "cartesian"
        geom_candidates = {"param": None, "override": None}
        amrvac_geom = self.parameters.get("geometry", None)
        if amrvac_geom is None:
            mylog.warning(
                "Could not find a 'geometry' parameter in source file.")
        else:
            geom_candidates.update({"param": self.parse_geometry(amrvac_geom)})

        if self._geometry_override is not None:
            try:
                geom_candidates.update(
                    {"override": self.parse_geometry(self._geometry_override)})
            except ValueError:
                mylog.error(
                    "Unknown value for geometry_override (will be ignored).")

        if geom_candidates["override"] is not None:
            mylog.warning(
                "Using override geometry, this may lead to surprising results for inappropriate values."
            )
            self.geometry = geom_candidates["override"]
        elif geom_candidates["param"] is not None:
            mylog.info("Using parameter geometry")
            self.geometry = geom_candidates["param"]
        else:
            mylog.warning(
                "No geometry parameter supplied or found, defaulting to cartesian."
            )
            self.geometry = "cartesian"

        # parse peridiocity
        per = self.parameters.get("periodic", np.array([False, False, False]))
        missing_dim = 3 - len(per)
        self.periodicity = np.append(per, [False] * missing_dim)

        self.gamma = self.parameters.get("gamma", 5.0 / 3.0)

        # parse domain edges
        dle = np.zeros(3)
        dre = np.ones(3)
        dle[:self.dimensionality] = self.parameters['xmin']
        dre[:self.dimensionality] = self.parameters['xmax']
        self.domain_left_edge = dle
        self.domain_right_edge = dre

        # defaulting to non-cosmological
        self.cosmological_simulation = 0
        self.current_redshift = 0.0
        self.omega_matter = 0.0
        self.omega_lambda = 0.0
        self.hubble_constant = 0.0
Ejemplo n.º 28
0
    def make_light_ray(self, seed=None,
                       start_position=None, end_position=None,
                       trajectory=None,
                       fields=None, setup_function=None,
                       solution_filename=None, data_filename=None,
                       get_los_velocity=True, redshift=None,
                       njobs=-1):
        """
        make_light_ray(seed=None, start_position=None, end_position=None,
                       trajectory=None, fields=None, setup_function=None,
                       solution_filename=None, data_filename=None,
                       get_los_velocity=True, redshift=None,
                       njobs=-1)

        Create a light ray and get field values for each lixel.  A light
        ray consists of a list of field values for cells intersected by
        the ray and the path length of the ray through those cells.
        Light ray data can be written out to an hdf5 file.

        Parameters
        ----------
        seed : optional, int
            Seed for the random number generator.
            Default: None.
        start_position : optional, list of floats
            Used only if creating a light ray from a single dataset.
            The coordinates of the starting position of the ray.
            Default: None.
        end_position : optional, list of floats
            Used only if creating a light ray from a single dataset.
            The coordinates of the ending position of the ray.
            Default: None.
        trajectory : optional, list of floats
            Used only if creating a light ray from a single dataset.
            The (r, theta, phi) direction of the light ray.  Use either 
            end_position or trajectory, not both.
            Default: None.
        fields : optional, list
            A list of fields for which to get data.
            Default: None.
        setup_function : optional, callable, accepts a ds
            This function will be called on each dataset that is loaded 
            to create the light ray.  For, example, this can be used to 
            add new derived fields.
            Default: None.
        solution_filename : optional, string
            Path to a text file where the trajectories of each
            subray is written out.
            Default: None.
        data_filename : optional, string
            Path to output file for ray data.
            Default: None.
        get_los_velocity : optional, bool
            If True, the line of sight velocity is calculated for
            each point in the ray.
            Default: True.
        redshift : optional, float
            Used with light rays made from single datasets to specify a 
            starting redshift for the ray.  If not used, the starting 
            redshift will be 0 for a non-cosmological dataset and 
            the dataset redshift for a cosmological dataset.
            Default: None.
        njobs : optional, int
            The number of parallel jobs over which the segments will 
            be split.  Choose -1 for one processor per segment.
            Default: -1.

        Examples
        --------

        Make a light ray from multiple datasets:
        
        >>> import yt
        >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
        ...     LightRay
        >>> my_ray = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo",
        ...                   0., 0.1, time_data=False)
        ...
        >>> my_ray.make_light_ray(seed=12345,
        ...                       solution_filename="solution.txt",
        ...                       data_filename="my_ray.h5",
        ...                       fields=["temperature", "density"],
        ...                       get_los_velocity=True)

        Make a light ray from a single dataset:

        >>> import yt
        >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
        ...     LightRay
        >>> my_ray = LightRay("IsolatedGalaxy/galaxy0030/galaxy0030")
        ...
        >>> my_ray.make_light_ray(start_position=[0., 0., 0.],
        ...                       end_position=[1., 1., 1.],
        ...                       solution_filename="solution.txt",
        ...                       data_filename="my_ray.h5",
        ...                       fields=["temperature", "density"],
        ...                       get_los_velocity=True)
        
        """

        # Calculate solution.
        self._calculate_light_ray_solution(seed=seed, 
                                           start_position=start_position, 
                                           end_position=end_position,
                                           trajectory=trajectory,
                                           filename=solution_filename)

        # Initialize data structures.
        self._data = {}
        if fields is None: fields = []
        data_fields = fields[:]
        all_fields = fields[:]
        all_fields.extend(['dl', 'dredshift', 'redshift'])
        if get_los_velocity:
            all_fields.extend(['velocity_x', 'velocity_y',
                               'velocity_z', 'velocity_los'])
            data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])

        all_ray_storage = {}
        for my_storage, my_segment in parallel_objects(self.light_ray_solution,
                                                       storage=all_ray_storage,
                                                       njobs=njobs):

            # Load dataset for segment.
            ds = load(my_segment['filename'], **self.load_kwargs)

            my_segment['unique_identifier'] = ds.unique_identifier
            if redshift is not None:
                if ds.cosmological_simulation and redshift != ds.current_redshift:
                    mylog.warn("Generating light ray with different redshift than " +
                               "the dataset itself.")
                my_segment["redshift"] = redshift

            if setup_function is not None:
                setup_function(ds)

            if start_position is not None:
                my_segment["start"] = ds.arr(my_segment["start"], "code_length")
                my_segment["end"] = ds.arr(my_segment["end"], "code_length")
            else:
                my_segment["start"] = ds.domain_width * my_segment["start"] + \
                  ds.domain_left_edge
                my_segment["end"] = ds.domain_width * my_segment["end"] + \
                  ds.domain_left_edge

            if not ds.cosmological_simulation:
                next_redshift = my_segment["redshift"]
            elif self.near_redshift == self.far_redshift:
                next_redshift = my_segment["redshift"] - \
                  self._deltaz_forward(my_segment["redshift"], 
                                       ds.domain_width[0].in_units("Mpccm / h") *
                                       my_segment["traversal_box_fraction"])
            elif my_segment.get("next", None) is None:
                next_redshift = self.near_redshift
            else:
                next_redshift = my_segment['next']['redshift']

            mylog.info("Getting segment at z = %s: %s to %s." %
                       (my_segment['redshift'], my_segment['start'],
                        my_segment['end']))

            # Break periodic ray into non-periodic segments.
            sub_segments = periodic_ray(my_segment['start'], my_segment['end'],
                                        left=ds.domain_left_edge,
                                        right=ds.domain_right_edge)

            # Prepare data structure for subsegment.
            sub_data = {}
            sub_data['segment_redshift'] = my_segment['redshift']
            for field in all_fields:
                sub_data[field] = []

            # Get data for all subsegments in segment.
            for sub_segment in sub_segments:
                mylog.info("Getting subsegment: %s to %s." %
                           (list(sub_segment[0]), list(sub_segment[1])))
                sub_ray = ds.ray(sub_segment[0], sub_segment[1])
                asort = np.argsort(sub_ray["t"])
                sub_data['dl'].extend(sub_ray['dts'][asort] *
                                      vector_length(sub_ray.start_point,
                                                    sub_ray.end_point))
                for field in data_fields:
                    sub_data[field].extend(sub_ray[field][asort])

                if get_los_velocity:
                    line_of_sight = sub_segment[1] - sub_segment[0]
                    line_of_sight /= ((line_of_sight**2).sum())**0.5
                    sub_vel = ds.arr([sub_ray['velocity_x'],
                                      sub_ray['velocity_y'],
                                      sub_ray['velocity_z']])
                    sub_data['velocity_los'].extend((np.rollaxis(sub_vel, 1) *
                                                     line_of_sight).sum(axis=1)[asort])
                    del sub_vel

                sub_ray.clear_data()
                del sub_ray, asort

            for key in sub_data:
                sub_data[key] = ds.arr(sub_data[key]).in_cgs()

            # Get redshift for each lixel.  Assume linear relation between l and z.
            sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
                (sub_data['dl'] / vector_length(my_segment['start'], 
                                                my_segment['end']).in_cgs())
            sub_data['redshift'] = my_segment['redshift'] - \
              sub_data['dredshift'].cumsum() + sub_data['dredshift']

            # Remove empty lixels.
            sub_dl_nonzero = sub_data['dl'].nonzero()
            for field in all_fields:
                sub_data[field] = sub_data[field][sub_dl_nonzero]
            del sub_dl_nonzero

            # Add to storage.
            my_storage.result = sub_data

            del ds

        # Reconstruct ray data from parallel_objects storage.
        all_data = [my_data for my_data in all_ray_storage.values()]
        # This is now a list of segments where each one is a dictionary
        # with all the fields.
        all_data.sort(key=lambda a:a['segment_redshift'], reverse=True)
        # Flatten the list into a single dictionary containing fields
        # for the whole ray.
        all_data = _flatten_dict_list(all_data, exceptions=['segment_redshift'])

        if data_filename is not None:
            self._write_light_ray(data_filename, all_data)

        self._data = all_data
        return all_data
Ejemplo n.º 29
0
def write_projection(data,
                     filename,
                     colorbar=True,
                     colorbar_label=None,
                     title=None,
                     limits=None,
                     take_log=True,
                     figsize=(8, 6),
                     dpi=100,
                     cmap_name=None,
                     extent=None,
                     xlabel=None,
                     ylabel=None):
    r"""Write a projection or volume rendering to disk with a variety of 
    pretty parameters such as limits, title, colorbar, etc.  write_projection
    uses the standard matplotlib interface to create the figure.  N.B. This code
    only works *after* you have created the projection using the standard 
    framework (i.e. the Camera interface or off_axis_projection).

    Accepts an NxM sized array representing the projection itself as well
    as the filename to which you will save this figure.  Note that the final
    resolution of your image will be a product of dpi/100 * figsize.

    Parameters
    ----------
    data : array_like 
        image array as output by off_axis_projection or camera.snapshot()
    filename : string 
        the filename where the data will be saved
    colorbar : boolean
        do you want a colorbar generated to the right of the image?
    colorbar_label : string
        the label associated with your colorbar
    title : string
        the label at the top of the figure
    limits : 2-element array_like
        the lower limit and the upper limit to be plotted in the figure 
        of the data array
    take_log : boolean
        plot the log of the data array (and take the log of the limits if set)?
    figsize : array_like
        width, height in inches of final image
    dpi : int
        final image resolution in pixels / inch
    cmap_name : string
        The name of the colormap.

    Examples
    --------

    >>> image = off_axis_projection(ds, c, L, W, N, "Density", no_ghost=False)
    >>> write_projection(image, 'test.png', 
                         colorbar_label="Column Density (cm$^{-2}$)", 
                         title="Offaxis Projection", limits=(1e-5,1e-3), 
                         take_log=True)
    """
    if cmap_name is None:
        cmap_name = ytcfg.get("yt", "default_colormap")
    import matplotlib.figure
    import matplotlib.colors
    from ._mpl_imports import FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS

    # If this is rendered as log, then apply now.
    if take_log:
        norm = matplotlib.colors.LogNorm()
    else:
        norm = matplotlib.colors.Normalize()

    if limits is None:
        limits = [None, None]

    # Create the figure and paint the data on
    fig = matplotlib.figure.Figure(figsize=figsize)
    ax = fig.add_subplot(111)

    cax = ax.imshow(data.to_ndarray(),
                    vmin=limits[0],
                    vmax=limits[1],
                    norm=norm,
                    extent=extent,
                    cmap=cmap_name)

    if title:
        ax.set_title(title)

    if xlabel:
        ax.set_xlabel(xlabel)
    if ylabel:
        ax.set_ylabel(ylabel)

    # Suppress the x and y pixel counts
    if extent is None:
        ax.set_xticks(())
        ax.set_yticks(())

    # Add a color bar and label if requested
    if colorbar:
        cbar = fig.colorbar(cax)
        if colorbar_label:
            cbar.ax.set_ylabel(colorbar_label)

    fig.tight_layout()

    suffix = get_image_suffix(filename)

    if suffix == '':
        suffix = '.png'
        filename = "%s%s" % (filename, suffix)
    mylog.info("Saving plot %s", filename)
    if suffix == ".png":
        canvas = FigureCanvasAgg(fig)
    elif suffix == ".pdf":
        canvas = FigureCanvasPdf(fig)
    elif suffix in (".eps", ".ps"):
        canvas = FigureCanvasPS(fig)
    else:
        mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
        canvas = FigureCanvasAgg(fig)

    canvas.print_figure(filename, dpi=dpi)
    return filename
Ejemplo n.º 30
0
                        'd'), ('nstep', 2, 'i'), ('stat', 3, 'd'),
                ('cosm', 7, 'd'), ('timing', 5, 'd'), ('mass_sph', 1, 'd'))
    yield next_set


field_aliases = {
    'standard_five':
    ('Density', 'x-velocity', 'y-velocity', 'z-velocity', 'Pressure'),
    'standard_six': ('Density', 'x-velocity', 'y-velocity', 'z-velocity',
                     'Pressure', 'Metallicity'),
}

particle_families = {
    'DM': 1,
    'star': 2,
    'cloud': 3,
    'dust': 4,
    'star_tracer': -2,
    'cloud_tracer': -3,
    'dust_tracer': -4,
    'gas_tracer': 0
}

if ytcfg.has_section('ramses-families'):
    for key in particle_families.keys():
        val = ytcfg.getint('ramses-families', key, fallback=None)
        if val is not None:
            mylog.info('Changing family %s from %s to %s' %
                       (key, particle_families[key], val))
            particle_families[key] = val
Ejemplo n.º 31
0
def _render_opengl(data_source,
                   field=None,
                   window_size=None,
                   cam_position=None,
                   cam_focus=None):
    '''High level wrapper for Interactive Data Visualization
    
    Parameters
    ----------
    data_source : :class:`yt.data_objects.data_containers.AMR3DData`,
                  :class:`yt.data_objects.static_output.Dataset`
        This is the source to be rendered, which can be any arbitrary yt
        3D object
    field : string, tuple, optional
        The field to be rendered. If unspecified, this will use the
        default_field for your dataset's frontend--usually ('gas', 'density').
    window_size : 2 element tuple of ints
        The width and the height of the Interactive Data Visualization window.
        For performance reasons it is recommended to use values that are natural
        powers of 2.
    cam_position : 3 element YTArray, optional
        The camera position in physical coordinates. If unspecified,
        data_source's domain right edge will be used.
    cam_focus: 3 element YTArray, optional
        The focus defines the point the camera is pointed at. If unspecified,
        data_source's domain center will be used.

    Examples
    --------

    >>> import yt
    >>> ds = yt.load("Enzo_64/DD0046/DD0046")
    >>> yt.interactive_render(ds)

    '''

    try:
        import cyglfw3  # NOQA
        import OpenGL.GL  # NOQA
    except ImportError:
        raise ImportError(
            "This functionality requires the cyglfw3 and PyOpenGL "
            "packages to be installed.")

    from .interactive_vr import SceneGraph, BlockCollection, TrackballCamera, \
        MeshSceneComponent
    from .interactive_loop import RenderingContext

    if isinstance(data_source, Dataset):
        dobj = data_source.all_data()
    else:
        dobj = data_source
    if field is None:
        field = dobj.ds.default_field
        if field not in dobj.ds.derived_field_list:
            raise YTSceneFieldNotFound("""Could not find field '%s' in %s.
                  Please specify a field in create_scene()""" %
                                       (field, dobj.ds))
        mylog.info('Setting default field to %s' % field.__repr__())
    if window_size is None:
        window_size = (1024, 1024)
    if cam_position is None:
        cam_position = dobj.ds.domain_right_edge
        if hasattr(dobj.ds.index, "meshes"):
            # unstructured mesh datasets tend to have tight
            # domain boundaries, do some extra padding here.
            cam_position = 3.0 * dobj.ds.domain_right_edge
    if cam_focus is None:
        cam_focus = dobj.ds.domain_center

    rc = RenderingContext(*window_size)

    if hasattr(dobj.ds.index, "meshes"):
        scene = MeshSceneComponent(dobj, field)
    else:
        scene = SceneGraph()
        collection = BlockCollection()
        collection.add_data(dobj, field)
        scene.add_collection(collection)

    aspect_ratio = window_size[1] / window_size[0]
    far_plane = np.linalg.norm(cam_focus - cam_position) * 2.0
    near_plane = 0.01 * far_plane

    c = TrackballCamera(position=cam_position,
                        focus=cam_focus,
                        near_plane=near_plane,
                        far_plane=far_plane,
                        aspect_ratio=aspect_ratio)
    rc.start_loop(scene, c)
Ejemplo n.º 32
0
    def save(self, fname=None, sigma_clip=None, render=True):
        r"""Saves a rendered image of the Scene to disk.

        Once you have created a scene, this saves an image array to disk with
        an optional filename. This function calls render() to generate an
        image array, unless the render parameter is set to False, in which case
        the most recently rendered scene is used if it exists.

        Parameters
        ----------
        fname: string, optional
            If specified, save the rendering as to the file "fname".
            If unspecified, it creates a default based on the dataset filename.
            The file format is inferred from the filename's suffix. Supported
            fomats are png, pdf, eps, and ps.
            Default: None
        sigma_clip: float, optional
            Image values greater than this number times the standard deviation
            plus the mean of the image will be clipped before saving. Useful
            for enhancing images as it gets rid of rare high pixel values.
            Default: None

            floor(vals > std_dev*sigma_clip + mean)
        render: boolean, optional
            If True, will always render the scene before saving.
            If False, will use results of previous render if it exists.
            Default: True

        Returns
        -------
            Nothing

        Examples
        --------

        >>> import yt
        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
        >>>
        >>> sc = yt.create_scene(ds)
        >>> # Modify camera, sources, etc...
        >>> sc.save('test.png', sigma_clip=4)

        When saving multiple images without modifying the scene (camera,
        sources,etc.), render=False can be used to avoid re-rendering when a scene is saved.
        This is useful for generating images at a range of sigma_clip values:

        >>> import yt
        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
        >>>
        >>> sc = yt.create_scene(ds)
        >>> # save with different sigma clipping values
        >>> sc.save('raw.png')  # The initial render call happens here
        >>> sc.save('clipped_2.png', sigma_clip=2, render=False)
        >>> sc.save('clipped_4.png', sigma_clip=4, render=False)

        """
        if fname is None:
            sources = list(self.sources.values())
            rensources = [s for s in sources if isinstance(s, RenderSource)]
            # if a volume source present, use its affiliated ds for fname
            if len(rensources) > 0:
                rs = rensources[0]
                basename = rs.data_source.ds.basename
                if isinstance(rs.field, str):
                    field = rs.field
                else:
                    field = rs.field[-1]
                fname = "%s_Render_%s.png" % (basename, field)
            # if no volume source present, use a default filename
            else:
                fname = "Render_opaque.png"
        suffix = get_image_suffix(fname)
        if suffix == "":
            suffix = ".png"
            fname = "%s%s" % (fname, suffix)

        render = self._sanitize_render(render)
        if render:
            self.render()
        mylog.info("Saving rendered image to %s", fname)

        # We can render pngs natively but for other formats we defer to
        # matplotlib.
        if suffix == ".png":
            self._last_render.write_png(fname, sigma_clip=sigma_clip)
        else:
            from matplotlib.backends.backend_pdf import FigureCanvasPdf
            from matplotlib.backends.backend_ps import FigureCanvasPS
            from matplotlib.figure import Figure

            shape = self._last_render.shape
            fig = Figure((shape[0] / 100.0, shape[1] / 100.0))
            if suffix == ".pdf":
                canvas = FigureCanvasPdf(fig)
            elif suffix in (".eps", ".ps"):
                canvas = FigureCanvasPS(fig)
            else:
                raise NotImplementedError("Unknown file suffix '{}'".format(suffix))
            ax = fig.add_axes([0, 0, 1, 1])
            ax.set_axis_off()
            out = self._last_render
            nz = out[:, :, :3][out[:, :, :3].nonzero()]
            max_val = nz.mean() + sigma_clip * nz.std()
            alpha = 255 * out[:, :, 3].astype("uint8")
            out = np.clip(out[:, :, :3] / max_val, 0.0, 1.0) * 255
            out = np.concatenate([out.astype("uint8"), alpha[..., None]], axis=-1)
            # not sure why we need rot90, but this makes the orientation
            # match the png writer
            ax.imshow(np.rot90(out), origin="lower")
            canvas.print_figure(fname, dpi=100)
Ejemplo n.º 33
0
    def detect_fields(cls, ds):
        # Try to get the detected fields
        detected_fields = cls.get_detected_fields(ds)
        if detected_fields:
            return detected_fields

        num = os.path.basename(ds.parameter_filename).split("."
                )[0].split("_")[1]
        testdomain = 1 # Just pick the first domain file to read
        basepath = os.path.abspath(
              os.path.dirname(ds.parameter_filename))
        basename = "%s/%%s_%s.out%05i" % (
            basepath, num, testdomain)
        fname = basename % 'hydro'
        fname_desc = os.path.join(basepath, cls.file_descriptor)

        attrs = cls.attrs
        with FortranFile(fname) as fd:
            hvals = fd.read_attrs(attrs)
        cls.parameters = hvals

        # Store some metadata
        ds.gamma = hvals['gamma']
        nvar = hvals['nvar']

        ok = False

        # Either the fields are given by dataset
        if ds._fields_in_file is not None:
            fields = list(ds._fields_in_file)
            ok = True
        elif os.path.exists(fname_desc):
            # Or there is an hydro file descriptor
            mylog.debug('Reading hydro file descriptor.')
            # For now, we can only read double precision fields
            fields = [e[0] for e in _read_fluid_file_descriptor(fname_desc)]

            # We get no fields for old-style hydro file descriptor
            ok = len(fields) > 0

        elif cls.config_field and ytcfg.has_section(cls.config_field):
            # Or this is given by the config
            cfg = ytcfg.get(cls.config_field, 'fields')
            known_fields = []
            for field in (_.strip() for _ in cfg.split('\n') if _.strip() != ''):
                known_fields.append(field.strip())
            fields = known_fields

            ok = True

        # Else, attempt autodetection
        if not ok:
            foldername  = os.path.abspath(os.path.dirname(ds.parameter_filename))
            rt_flag = any(glob.glob(os.sep.join([foldername, 'info_rt_*.txt'])))
            if rt_flag: # rt run
                if nvar < 10:
                    mylog.info('Detected RAMSES-RT file WITHOUT IR trapping.')

                    fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pressure",
                              "Metallicity", "HII", "HeII", "HeIII"]
                else:
                    mylog.info('Detected RAMSES-RT file WITH IR trapping.')

                    fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pres_IR",
                              "Pressure", "Metallicity", "HII", "HeII", "HeIII"]
            else:
                if nvar < 5:
                    mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
                    raise ValueError
                # Basic hydro runs
                if nvar == 5:
                    fields = ["Density",
                              "x-velocity", "y-velocity", "z-velocity",
                              "Pressure"]
                if nvar > 5 and nvar < 11:
                    fields = ["Density",
                              "x-velocity", "y-velocity", "z-velocity",
                              "Pressure", "Metallicity"]
                # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
                if nvar == 11:
                    fields = ["Density",
                              "x-velocity", "y-velocity", "z-velocity",
                              "B_x_left","B_y_left","B_z_left",
                              "B_x_right","B_y_right","B_z_right",
                              "Pressure"]
                if nvar > 11:
                    fields = ["Density",
                              "x-velocity", "y-velocity", "z-velocity",
                              "B_x_left","B_y_left","B_z_left",
                              "B_x_right","B_y_right","B_z_right",
                              "Pressure", "Metallicity"]
            mylog.debug("No fields specified by user; automatically setting fields array to %s"
                        % str(fields))

        # Allow some wiggle room for users to add too many variables
        count_extra = 0
        while len(fields) < nvar:
            fields.append("var"+str(len(fields)))
            count_extra += 1
        if count_extra > 0:
            mylog.debug('Detected %s extra fluid fields.' % count_extra)
        cls.field_list = [(cls.ftype, e) for e in fields]

        cls.set_detected_fields(ds, fields)

        return fields
Ejemplo n.º 34
0
def off_axis_projection(data_source,
                        center,
                        normal_vector,
                        width,
                        resolution,
                        item,
                        weight=None,
                        volume=None,
                        no_ghost=False,
                        interpolated=False,
                        north_vector=None,
                        num_threads=1,
                        method='integrate'):
    r"""Project through a dataset, off-axis, and return the image plane.

    This function will accept the necessary items to integrate through a volume
    at an arbitrary angle and return the integrated field of view to the user.
    Note that if a weight is supplied, it will multiply the pre-interpolated
    values together, then create cell-centered values, then interpolate within
    the cell to conduct the integration.

    Parameters
    ----------
    data_source : ~yt.data_objects.static_output.Dataset or ~yt.data_objects.data_containers.YTSelectionDataContainer
        This is the dataset or data object to volume render.
    center : array_like
        The current 'center' of the view port -- the focal point for the
        camera.
    normal_vector : array_like
        The vector between the camera position and the center.
    width : float or list of floats
        The current width of the image.  If a single float, the volume is
        cubical, but if not, it is left/right, top/bottom, front/back
    resolution : int or list of ints
        The number of pixels in each direction.
    item: string
        The field to project through the volume
    weight : optional, default None
        If supplied, the field will be pre-multiplied by this, then divided by
        the integrated value of this field.  This returns an average rather
        than a sum.
    volume : `yt.extensions.volume_rendering.AMRKDTree`, optional
        The volume to ray cast through.  Can be specified for finer-grained
        control, but otherwise will be automatically generated.
    no_ghost: bool, optional
        Optimization option.  If True, homogenized bricks will
        extrapolate out from grid instead of interpolating from
        ghost zones that have to first be calculated.  This can
        lead to large speed improvements, but at a loss of
        accuracy/smoothness in resulting image.  The effects are
        less notable when the transfer function is smooth and
        broad. Default: True
    interpolated : optional, default False
        If True, the data is first interpolated to vertex-centered data,
        then tri-linearly interpolated along the ray. Not suggested for
        quantitative studies.
    north_vector : optional, array_like, default None
        A vector that, if specified, restricts the orientation such that the
        north vector dotted into the image plane points "up". Useful for rotations
    num_threads: integer, optional, default 1
        Use this many OpenMP threads during projection.
    method : string
        The method of projection.  Valid methods are:

        "integrate" with no weight_field specified : integrate the requested
        field along the line of sight.

        "integrate" with a weight_field specified : weight the requested
        field by the weighting field and integrate along the line of sight.

        "sum" : This method is the same as integrate, except that it does not
        multiply by a path length when performing the integration, and is
        just a straight summation of the field along the given axis. WARNING:
        This should only be used for uniform resolution grid datasets, as other
        datasets may result in unphysical images.
        or camera movements.
    Returns
    -------
    image : array
        An (N,N) array of the final integrated values, in float64 form.

    Examples
    --------

    >>> image = off_axis_projection(ds, [0.5, 0.5, 0.5], [0.2,0.3,0.4],
    ...                             0.2, N, "temperature", "density")
    >>> write_image(np.log10(image), "offaxis.png")

    """

    if method not in ['integrate', 'sum']:
        raise NotImplementedError(
            "Only 'integrate' or 'sum' methods are valid for off-axis-projections"
        )

    if interpolated is True:
        raise NotImplementedError(
            "Only interpolated=False methods are currently implemented for off-axis-projections"
        )

    data_source = data_source_or_all(data_source)
    # Sanitize units
    if not hasattr(center, "units"):
        center = data_source.ds.arr(center, 'code_length')
    if not hasattr(width, "units"):
        width = data_source.ds.arr(width, 'code_length')
    sc = Scene()
    data_source.ds.index
    if item is None:
        field = data_source.ds.field_list[0]
        mylog.info('Setting default field to %s' % field.__repr__())

    funits = data_source.ds._get_field_info(item).units

    vol = VolumeSource(data_source, item)
    if weight is None:
        vol.set_field(item)
    else:
        # This is a temporary field, which we will remove at the end.
        weightfield = ("index", "temp_weightfield")

        def _make_wf(f, w):
            def temp_weightfield(a, b):
                tr = b[f].astype("float64") * b[w]
                return b.apply_units(tr, a.units)

            return temp_weightfield

        data_source.ds.field_info.add_field(weightfield,
                                            sampling_type="cell",
                                            function=_make_wf(item, weight))
        # Now we have to tell the dataset to add it and to calculate
        # its dependencies..
        deps, _ = data_source.ds.field_info.check_derived_fields([weightfield])
        data_source.ds.field_dependencies.update(deps)
        vol.set_field(weightfield)
        vol.set_weight_field(weight)
    ptf = ProjectionTransferFunction()
    vol.set_transfer_function(ptf)
    camera = sc.add_camera(data_source)
    camera.set_width(width)
    if not iterable(resolution):
        resolution = [resolution] * 2
    camera.resolution = resolution
    if not iterable(width):
        width = data_source.ds.arr([width] * 3)
    normal = np.array(normal_vector)
    normal = normal / np.linalg.norm(normal)

    camera.position = center - width[2] * normal
    camera.focus = center

    # If north_vector is None, we set the default here.
    # This is chosen so that if normal_vector is one of the
    # cartesian coordinate axes, the projection will match
    # the corresponding on-axis projection.
    if north_vector is None:
        vecs = np.identity(3)
        t = np.cross(vecs, normal).sum(axis=1)
        ax = t.argmax()
        east_vector = np.cross(vecs[ax, :], normal).ravel()
        north = np.cross(normal, east_vector).ravel()
    else:
        north = np.array(north_vector)
        north = north / np.linalg.norm(north)
    camera.switch_orientation(normal, north)

    sc.add_source(vol)

    vol.set_sampler(camera, interpolated=False)
    assert (vol.sampler is not None)

    fields = [vol.field]
    if vol.weight_field is not None:
        fields.append(vol.weight_field)

    mylog.debug("Casting rays")

    for i, (grid, mask) in enumerate(data_source.blocks):
        data = []
        for f in fields:
            # strip units before multiplying by mask for speed
            grid_data = grid[f]
            units = grid_data.units
            data.append(
                data_source.ds.arr(grid_data.d * mask, units, dtype='float64'))
        pg = PartitionedGrid(grid.id, data, mask.astype('uint8'),
                             grid.LeftEdge, grid.RightEdge,
                             grid.ActiveDimensions.astype("int64"))
        grid.clear_data()
        vol.sampler(pg, num_threads=num_threads)

    image = vol.finalize_image(camera, vol.sampler.aimage)
    image = ImageArray(image,
                       funits,
                       registry=data_source.ds.unit_registry,
                       info=image.info)

    if weight is not None:
        data_source.ds.field_info.pop(("index", "temp_weightfield"))

    if method == "integrate":
        if weight is None:
            dl = width[2].in_units(data_source.ds.unit_system["length"])
            image *= dl
        else:
            mask = image[:, :, 1] == 0
            image[:, :, 0] /= image[:, :, 1]
            image[mask] = 0

    return image[:, :, 0]
Ejemplo n.º 35
0
    def create_cosmology_splice(self,
                                near_redshift,
                                far_redshift,
                                minimal=True,
                                max_box_fraction=1.0,
                                deltaz_min=0.0,
                                time_data=True,
                                redshift_data=True):
        r"""Create list of datasets capable of spanning a redshift
        interval.

        For cosmological simulations, the physical width of the simulation
        box corresponds to some \Delta z, which varies with redshift.
        Using this logic, one can stitch together a series of datasets to
        create a continuous volume or length element from one redshift to
        another. This method will return such a list

        Parameters
        ----------
        near_redshift : float
            The nearest (lowest) redshift in the cosmology splice list.
        far_redshift : float
            The furthest (highest) redshift in the cosmology splice list.
        minimal : bool
            If True, the minimum number of datasets is used to connect the
            initial and final redshift.  If false, the list will contain as
            many entries as possible within the redshift
            interval.
            Default: True.
        max_box_fraction : float
            In terms of the size of the domain, the maximum length a light
            ray segment can be in order to span the redshift interval from
            one dataset to another.  If using a zoom-in simulation, this
            parameter can be set to the length of the high resolution
            region so as to limit ray segments to that size.  If the
            high resolution region is not cubical, the smallest side
            should be used.
            Default: 1.0 (the size of the box)
        deltaz_min : float
            Specifies the minimum delta z between consecutive datasets
            in the returned
            list.
            Default: 0.0.
        time_data : bool
            Whether or not to include time outputs when gathering
            datasets for time series.
            Default: True.
        redshift_data : bool
            Whether or not to include redshift outputs when gathering
            datasets for time series.
            Default: True.

        Examples
        --------

        >>> co = CosmologySplice("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo")
        >>> cosmo = co.create_cosmology_splice(1.0, 0.0)

        """

        if time_data and redshift_data:
            self.splice_outputs = self.simulation.all_outputs
        elif time_data:
            self.splice_outputs = self.simulation.all_time_outputs
        elif redshift_data:
            self.splice_outputs = self.simulation.all_redshift_outputs
        else:
            mylog.error('Both time_data and redshift_data are False.')
            return

        # Link datasets in list with pointers.
        # This is used for connecting datasets together.
        for i, output in enumerate(self.splice_outputs):
            if i == 0:
                output['previous'] = None
            else:
                output['previous'] = self.splice_outputs[i - 1]

            if i == len(self.splice_outputs) - 1:
                output['next'] = None
            else:
                output['next'] = self.splice_outputs[i + 1]

        # Calculate maximum delta z for each data dump.
        self.max_box_fraction = max_box_fraction
        self._calculate_deltaz_max()

        # Calculate minimum delta z for each data dump.
        self._calculate_deltaz_min(deltaz_min=deltaz_min)

        cosmology_splice = []

        if near_redshift == far_redshift:
            self.simulation.get_time_series(redshifts=[near_redshift])
            cosmology_splice.append({
                'time':
                self.simulation[0].current_time,
                'redshift':
                self.simulation[0].current_redshift,
                'filename':
                os.path.join(self.simulation[0].fullpath,
                             self.simulation[0].basename),
                'next':
                None
            })
            mylog.info("create_cosmology_splice: Using %s for z = %f ." %
                       (cosmology_splice[0]['filename'], near_redshift))
            return cosmology_splice

        # Use minimum number of datasets to go from z_i to z_f.
        if minimal:

            z_Tolerance = 1e-3
            z = far_redshift

            # Sort data outputs by proximity to current redshift.
            self.splice_outputs.sort(
                key=lambda obj: np.fabs(z - obj['redshift']))
            cosmology_splice.append(self.splice_outputs[0])
            z = cosmology_splice[-1]["redshift"]
            z_target = z - cosmology_splice[-1]["dz_max"]

            # fill redshift space with datasets
            while ((z_target > near_redshift)
                   and (np.abs(z_target - near_redshift) > z_Tolerance)):

                # Move forward from last slice in stack until z > z_max.
                current_slice = cosmology_splice[-1]

                while current_slice["next"] is not None:
                    current_slice = current_slice['next']
                    if current_slice["next"] is None:
                        break
                    if current_slice["next"]["redshift"] < z_target:
                        break

                if current_slice["redshift"] < z_target:
                    need_fraction = self.cosmology.comoving_radial_distance(
                        current_slice["redshift"], z) / \
                        self.simulation.box_size
                    raise RuntimeError(
                        ("Cannot create cosmology splice: " +
                         "Getting from z = %f to %f requires " +
                         "max_box_fraction = %f, but max_box_fraction "
                         "is set to %f") % (z, current_slice["redshift"],
                                            need_fraction, max_box_fraction))

                cosmology_splice.append(current_slice)
                z = current_slice["redshift"]
                z_target = z - current_slice["dz_max"]

        # Make light ray using maximum number of datasets (minimum spacing).
        else:
            # Sort data outputs by proximity to current redshift.
            self.splice_outputs.sort(
                key=lambda obj: np.abs(far_redshift - obj['redshift']))
            # For first data dump, choose closest to desired redshift.
            cosmology_splice.append(self.splice_outputs[0])

            nextOutput = cosmology_splice[-1]['next']
            while (nextOutput is not None):
                if (nextOutput['redshift'] <= near_redshift):
                    break
                if ((cosmology_splice[-1]['redshift'] - nextOutput['redshift'])
                        > cosmology_splice[-1]['dz_min']):
                    cosmology_splice.append(nextOutput)
                nextOutput = nextOutput['next']
            if (cosmology_splice[-1]['redshift'] -
                    cosmology_splice[-1]['dz_max']) > near_redshift:
                mylog.error(
                    "Cosmology splice incomplete due to insufficient data outputs."
                )
                near_redshift = cosmology_splice[-1]['redshift'] - \
                  cosmology_splice[-1]['dz_max']

        mylog.info(
            "create_cosmology_splice: Used %d data dumps to get from z = %f to %f."
            % (len(cosmology_splice), far_redshift, near_redshift))

        # change the 'next' and 'previous' pointers to point to the correct outputs
        # for the created splice
        for i, output in enumerate(cosmology_splice):
            if len(cosmology_splice) == 1:
                output['previous'] = None
                output['next'] = None
            elif i == 0:
                output['previous'] = None
                output['next'] = cosmology_splice[i + 1]
            elif i == len(cosmology_splice) - 1:
                output['previous'] = cosmology_splice[i - 1]
                output['next'] = None
            else:
                output['previous'] = cosmology_splice[i - 1]
                output['next'] = cosmology_splice[i + 1]

        self.splice_outputs.sort(key=lambda obj: obj['time'])
        return cosmology_splice
Ejemplo n.º 36
0
def find_clumps(clump, min_val, max_val, d_clump):
    mylog.info("Finding clumps: min: %e, max: %e, step: %f" % 
               (min_val, max_val, d_clump))
    if min_val >= max_val: return
    clump.find_children(min_val)

    if (len(clump.children) == 1):
        find_clumps(clump, min_val*d_clump, max_val, d_clump)

    elif (len(clump.children) > 0):
        these_children = []
        mylog.info("Investigating %d children." % len(clump.children))
        for child in clump.children:
            find_clumps(child, min_val*d_clump, max_val, d_clump)
            if ((child.children is not None) and (len(child.children) > 0)):
                these_children.append(child)
            elif (child._validate()):
                these_children.append(child)
            else:
                mylog.info(("Eliminating invalid, childless clump with " +
                            "%d cells.") % len(child.data["ones"]))
        if (len(these_children) > 1):
            mylog.info("%d of %d children survived." %
                       (len(these_children),len(clump.children)))
            clump.children = these_children
        elif (len(these_children) == 1):
            mylog.info(("%d of %d children survived, linking its " +
                        "children to parent.") % 
                        (len(these_children),len(clump.children)))
            clump.children = these_children[0].children
        else:
            mylog.info("%d of %d children survived, erasing children." %
                       (len(these_children),len(clump.children)))
            clump.children = []
Ejemplo n.º 37
0
def create_spectral_slabs(filename, slab_centers, slab_width,
                          **kwargs):
    r"""
    Given a dictionary of spectral slab centers and a width in
    spectral units, extract data from a spectral cube at these slab
    centers and return a `FITSDataset` instance containing the different 
    slabs as separate yt fields. Useful for extracting individual 
    lines from a spectral cube and separating them out as different fields. 

    Requires the SpectralCube (http://spectral-cube.readthedocs.org)
    library.

    All keyword arguments will be passed on to the `FITSDataset` constructor.

    Parameters
    ----------
    filename : string
        The spectral cube FITS file to extract the data from.
    slab_centers : dict of (float, string) tuples or YTQuantities
        The centers of the slabs, where the keys are the names
        of the new fields and the values are (float, string) tuples or
        YTQuantities, specifying a value for each center and its unit.
    slab_width : YTQuantity or (float, string) tuple
        The width of the slab along the spectral axis.

    Examples
    --------
    >>> slab_centers = {'13CN': (218.03117, 'GHz'),
    ...                 'CH3CH2CHO': (218.284256, 'GHz'),
    ...                 'CH3NH2': (218.40956, 'GHz')}
    >>> slab_width = (0.05, "GHz")
    >>> ds = create_spectral_slabs("intensity_cube.fits", 
    ...                            slab_centers, slab_width,
    ...                            nan_mask=0.0)
    """
    from spectral_cube import SpectralCube
    from yt.frontends.fits.api import FITSDataset
    cube = SpectralCube.read(filename)
    if not isinstance(slab_width, YTQuantity):
        slab_width = YTQuantity(slab_width[0], slab_width[1])
    slab_data = {}
    field_units = cube.header.get("bunit", "dimensionless")
    for k, v in slab_centers.items():
        if not isinstance(v, YTQuantity):
            slab_center = YTQuantity(v[0], v[1])
        else:
            slab_center = v
        mylog.info("Adding slab field %s at %g %s" %
                   (k, slab_center.v, slab_center.units))
        slab_lo = (slab_center-0.5*slab_width).to_astropy()
        slab_hi = (slab_center+0.5*slab_width).to_astropy()
        subcube = cube.spectral_slab(slab_lo, slab_hi)
        slab_data[k] = YTArray(subcube.filled_data[:,:,:], field_units)
    width = subcube.header["naxis3"]*cube.header["cdelt3"]
    w = subcube.wcs.copy()
    w.wcs.crpix[-1] = 0.5
    w.wcs.crval[-1] = -0.5*width
    fid = FITSImageData(slab_data, wcs=w)
    for hdu in fid:
        hdu.header.pop("RESTFREQ", None)
        hdu.header.pop("RESTFRQ", None)
    ds = FITSDataset(fid, **kwargs)
    return ds
Ejemplo n.º 38
0
    def __init__(
        self,
        data,
        fields=None,
        length_unit=None,
        width=None,
        img_ctr=None,
        wcs=None,
        current_time=None,
        time_unit=None,
        mass_unit=None,
        velocity_unit=None,
        magnetic_unit=None,
        ds=None,
        unit_header=None,
        **kwargs,
    ):
        r"""Initialize a FITSImageData object.

        FITSImageData contains a collection of FITS ImageHDU instances and
        WCS information, along with units for each of the images. FITSImageData
        instances can be constructed from ImageArrays, NumPy arrays, dicts
        of such arrays, FixedResolutionBuffers, and YTCoveringGrids. The latter
        two are the most powerful because WCS information can be constructed
        automatically from their coordinates.

        Parameters
        ----------
        data : FixedResolutionBuffer or a YTCoveringGrid. Or, an
            ImageArray, an numpy.ndarray, or dict of such arrays
            The data to be made into a FITS image or images.
        fields : single string or list of strings, optional
            The field names for the data. If *fields* is none and *data* has
            keys, it will use these for the fields. If *data* is just a
            single array one field name must be specified.
        length_unit : string
            The units of the WCS coordinates and the length unit of the file.
            Defaults to the length unit of the dataset, if there is one, or
            "cm" if there is not.
        width : float or YTQuantity
            The width of the image. Either a single value or iterable of values.
            If a float, assumed to be in *units*. Only used if this information
            is not already provided by *data*.
        img_ctr : array_like or YTArray
            The center coordinates of the image. If a list or NumPy array,
            it is assumed to be in *units*. Only used if this information
            is not already provided by *data*.
        wcs : `~astropy.wcs.WCS` instance, optional
            Supply an AstroPy WCS instance. Will override automatic WCS
            creation from FixedResolutionBuffers and YTCoveringGrids.
        current_time : float, tuple, or YTQuantity, optional
            The current time of the image(s). If not specified, one will
            be set from the dataset if there is one. If a float, it will
            be assumed to be in *time_unit* units.
        time_unit : string
            The default time units of the file. Defaults to "s".
        mass_unit : string
            The default time units of the file. Defaults to "g".
        velocity_unit : string
            The default velocity units of the file. Defaults to "cm/s".
        magnetic_unit : string
            The default magnetic units of the file. Defaults to "gauss".
        ds : `~yt.static_output.Dataset` instance, optional
            The dataset associated with the image(s), typically used
            to transfer metadata to the header(s). Does not need to be
            specified if *data* has a dataset as an attribute.

        Examples
        --------

        >>> # This example uses a FRB.
        >>> ds = load("sloshing_nomag2_hdf5_plt_cnt_0150")
        >>> prj = ds.proj(2, "kT", weight_field=("gas", "density"))
        >>> frb = prj.to_frb((0.5, "Mpc"), 800)
        >>> # This example just uses the FRB and puts the coords in kpc.
        >>> f_kpc = FITSImageData(frb, fields="kT", length_unit="kpc",
        ...                       time_unit=(1.0, "Gyr"))
        >>> # This example specifies a specific WCS.
        >>> from astropy.wcs import WCS
        >>> w = WCS(naxis=self.dimensionality)
        >>> w.wcs.crval = [30., 45.] # RA, Dec in degrees
        >>> w.wcs.cunit = ["deg"]*2
        >>> nx, ny = 800, 800
        >>> w.wcs.crpix = [0.5*(nx+1), 0.5*(ny+1)]
        >>> w.wcs.ctype = ["RA---TAN","DEC--TAN"]
        >>> scale = 1./3600. # One arcsec per pixel
        >>> w.wcs.cdelt = [-scale, scale]
        >>> f_deg = FITSImageData(frb, fields="kT", wcs=w)
        >>> f_deg.writeto("temp.fits")
        """

        if fields is not None:
            fields = list(iter_fields(fields))

        if ds is None:
            ds = getattr(data, "ds", None)

        self.fields = []
        self.field_units = {}

        if unit_header is None:
            self._set_units(ds, [
                length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit
            ])
        else:
            self._set_units_from_header(unit_header)

        wcs_unit = str(self.length_unit.units)

        self._fix_current_time(ds, current_time)

        if width is None:
            width = 1.0
        if isinstance(width, tuple):
            if ds is None:
                width = YTQuantity(width[0], width[1])
            else:
                width = ds.quan(width[0], width[1])
        if img_ctr is None:
            img_ctr = np.zeros(3)

        exclude_fields = [
            "x",
            "y",
            "z",
            "px",
            "py",
            "pz",
            "pdx",
            "pdy",
            "pdz",
            "weight_field",
        ]

        if isinstance(data, _astropy.pyfits.PrimaryHDU):
            data = _astropy.pyfits.HDUList([data])

        if isinstance(data, _astropy.pyfits.HDUList):
            self.hdulist = data
            for hdu in data:
                self.fields.append(hdu.header["btype"])
                self.field_units[hdu.header["btype"]] = hdu.header["bunit"]

            self.shape = self.hdulist[0].shape
            self.dimensionality = len(self.shape)
            wcs_names = [
                key for key in self.hdulist[0].header if "WCSNAME" in key
            ]
            for name in wcs_names:
                if name == "WCSNAME":
                    key = " "
                else:
                    key = name[-1]
                w = _astropy.pywcs.WCS(header=self.hdulist[0].header,
                                       key=key,
                                       naxis=self.dimensionality)
                setattr(self, "wcs" + key.strip().lower(), w)

            return

        self.hdulist = _astropy.pyfits.HDUList()

        if hasattr(data, "keys"):
            img_data = data
            if fields is None:
                fields = list(img_data.keys())
        elif isinstance(data, np.ndarray):
            if fields is None:
                mylog.warning(
                    "No field name given for this array. Calling it 'image_data'."
                )
                fn = "image_data"
                fields = [fn]
            else:
                fn = fields[0]
            img_data = {fn: data}

        for fd in fields:
            if isinstance(fd, tuple):
                self.fields.append(fd[1])
            elif isinstance(fd, DerivedField):
                self.fields.append(fd.name[1])
            else:
                self.fields.append(fd)

        # Sanity checking names
        s = set()
        duplicates = {f for f in self.fields if f in s or s.add(f)}
        if len(duplicates) > 0:
            for i, fd in enumerate(self.fields):
                if fd in duplicates:
                    if isinstance(fields[i], tuple):
                        ftype, fname = fields[i]
                    elif isinstance(fields[i], DerivedField):
                        ftype, fname = fields[i].name
                    else:
                        raise RuntimeError(
                            f"Cannot distinguish between fields with same name {fd}!"
                        )
                    self.fields[i] = f"{ftype}_{fname}"

        for is_first, _is_last, (i, (name, field)) in mark_ends(
                enumerate(zip(self.fields, fields))):
            if name not in exclude_fields:
                this_img = img_data[field]
                if hasattr(img_data[field], "units"):
                    if this_img.units.is_code_unit:
                        mylog.warning("Cannot generate an image with code "
                                      "units. Converting to units in CGS.")
                        funits = this_img.units.get_base_equivalent("cgs")
                    else:
                        funits = this_img.units
                    self.field_units[name] = str(funits)
                else:
                    self.field_units[name] = "dimensionless"
                mylog.info("Making a FITS image of field %s", name)
                if isinstance(this_img, ImageArray):
                    if i == 0:
                        self.shape = this_img.shape[::-1]
                    this_img = np.asarray(this_img)
                else:
                    if i == 0:
                        self.shape = this_img.shape
                    this_img = np.asarray(this_img.T)
                if is_first:
                    hdu = _astropy.pyfits.PrimaryHDU(this_img)
                else:
                    hdu = _astropy.pyfits.ImageHDU(this_img)
                hdu.name = name
                hdu.header["btype"] = name
                hdu.header["bunit"] = re.sub("()", "", self.field_units[name])
                for unit in ("length", "time", "mass", "velocity", "magnetic"):
                    if unit == "magnetic":
                        short_unit = "bf"
                    else:
                        short_unit = unit[0]
                    key = f"{short_unit}unit"
                    value = getattr(self, f"{unit}_unit")
                    if value is not None:
                        hdu.header[key] = float(value.value)
                        hdu.header.comments[key] = f"[{value.units}]"
                hdu.header["time"] = float(self.current_time.value)
                if hasattr(self, "current_redshift"):
                    hdu.header["HUBBLE"] = self.hubble_constant
                    hdu.header["REDSHIFT"] = self.current_redshift
                self.hdulist.append(hdu)

        self.dimensionality = len(self.shape)

        if wcs is None:
            w = _astropy.pywcs.WCS(header=self.hdulist[0].header,
                                   naxis=self.dimensionality)
            # FRBs and covering grids are special cases where
            # we have coordinate information, so we take advantage
            # of this and construct the WCS object
            if isinstance(img_data, FixedResolutionBuffer):
                dx = (img_data.bounds[1] -
                      img_data.bounds[0]).to_value(wcs_unit)
                dy = (img_data.bounds[3] -
                      img_data.bounds[2]).to_value(wcs_unit)
                dx /= self.shape[0]
                dy /= self.shape[1]
                xctr = 0.5 * (img_data.bounds[1] +
                              img_data.bounds[0]).to_value(wcs_unit)
                yctr = 0.5 * (img_data.bounds[3] +
                              img_data.bounds[2]).to_value(wcs_unit)
                center = [xctr, yctr]
                cdelt = [dx, dy]
            elif isinstance(img_data, YTCoveringGrid):
                cdelt = img_data.dds.to_value(wcs_unit)
                center = 0.5 * (img_data.left_edge +
                                img_data.right_edge).to_value(wcs_unit)
            else:
                # If img_data is just an array we use the width and img_ctr
                # parameters to determine the cell widths
                if not is_sequence(width):
                    width = [width] * self.dimensionality
                if isinstance(width[0], YTQuantity):
                    cdelt = [
                        wh.to_value(wcs_unit) / n
                        for wh, n in zip(width, self.shape)
                    ]
                else:
                    cdelt = [float(wh) / n for wh, n in zip(width, self.shape)]
                center = img_ctr[:self.dimensionality]
            w.wcs.crpix = 0.5 * (np.array(self.shape) + 1)
            w.wcs.crval = center
            w.wcs.cdelt = cdelt
            w.wcs.ctype = ["linear"] * self.dimensionality
            w.wcs.cunit = [wcs_unit] * self.dimensionality
            self.set_wcs(w)
        else:
            self.set_wcs(wcs)
Ejemplo n.º 39
0
    def project_light_cone(self, field_of_view, image_resolution, field,
                           weight_field=None, photon_field=False,
                           save_stack=True, save_final_image=True,
                           save_slice_images=False,
                           cmap_name="algae",
                           njobs=1, dynamic=False):
        r"""Create projections for light cone, then add them together.

        Parameters
        ----------
        field_of_view : YTQuantity or tuple of (float, str)
            The field of view of the image and the units.
        image_resolution : YTQuantity or tuple of (float, str)
            The size of each image pixel and the units.
        field : string
            The projected field.
        weight_field : string
            the weight field of the projection.  This has the same meaning as
            in standard projections.
            Default: None.
        photon_field : bool
            if True, the projection data for each slice is decremented by 4 Pi
            R^2`, where R is the luminosity distance between the observer and
            the slice redshift.
            Default: False.
        save_stack : bool
            if True, the light cone data including each individual
            slice is written to an hdf5 file.
            Default: True.
        save_final_image : bool
            if True, save an image of the final light cone projection.
            Default: True.
        save_slice_images : bool
            save images for each individual projection slice.
            Default: False.
        cmap_name : string
            color map for images.
            Default: "algae".
        njobs : int
            The number of parallel jobs over which the light cone projection
            will be split.  Choose -1 for one processor per individual
            projection and 1 to have all processors work together on each
            projection.
            Default: 1.
        dynamic : bool
            If True, use dynamic load balancing to create the projections.
            Default: False.

        """

        if isinstance(field_of_view, tuple) and len(field_of_view) == 2:
            field_of_view = self.simulation.quan(field_of_view[0],
                                                 field_of_view[1])
        elif not isinstance(field_of_view, YTArray):
          raise RuntimeError("field_of_view argument must be either a YTQauntity " +
                             "or a tuple of type (float, str).")
        if isinstance(image_resolution, tuple) and len(image_resolution) == 2:
            image_resolution = self.simulation.quan(image_resolution[0],
                                                    image_resolution[1])
        elif not isinstance(image_resolution, YTArray):
          raise RuntimeError("image_resolution argument must be either a YTQauntity " +
                             "or a tuple of type (float, str).")
        
        # Calculate number of pixels on a side.
        pixels = (field_of_view / image_resolution).in_units("")

        # Clear projection stack.
        projection_stack = []
        projection_weight_stack = []
        if "object" in self.light_cone_solution[-1]:
            del self.light_cone_solution[-1]["object"]

        # for q, output in enumerate(self.light_cone_solution):
        all_storage = {}
        for my_storage, output in parallel_objects(self.light_cone_solution,
                                                   storage=all_storage,
                                                   dynamic=dynamic):
            output["object"] = load(output["filename"])
            output["object"].parameters.update(self.set_parameters)

            # Calculate fraction of box required for width corresponding to
            # requested image size.
            proper_box_size = self.simulation.box_size / \
              (1.0 + output["redshift"])
            output["box_width_fraction"] = (output["box_width_per_angle"] *
                                            field_of_view).in_units("")
            
            frb = _light_cone_projection(output, field, pixels,
                                         weight_field=weight_field)

            if photon_field:
                # Decrement the flux by the luminosity distance.
                # Assume field in frb is in erg/s/cm^2/Hz
                dL = self.cosmology.luminosity_distance(self.observer_redshift,
                                                        output["redshift"])
                proper_box_size = self.simulation.box_size / \
                  (1.0 + output["redshift"])
                pixel_area = (proper_box_size.in_cgs() / pixels)**2 #in proper cm^2
                factor = pixel_area / (4.0 * np.pi * dL.in_cgs()**2)
                mylog.info("Distance to slice = %s" % dL)
                frb[field] *= factor #in erg/s/cm^2/Hz on observer"s image plane.

            if weight_field is None:
                my_storage.result = {"field": frb[field]}
            else:
                my_storage.result = {"field": (frb[field] *
                                               frb["weight_field"]),
                                     "weight_field": frb["weight_field"]}

            del output["object"]

        # Combine results from each slice.
        all_slices = list(all_storage.keys())
        all_slices.sort()
        for my_slice in all_slices:
            if save_slice_images:
                name = os.path.join(self.output_dir,
                                    "%s_%04d_%04d" %
                                    (self.output_prefix,
                                     my_slice, len(self.light_cone_solution)))
                if weight_field is None:
                    my_image = all_storage[my_slice]["field"]
                else:
                    my_image = all_storage[my_slice]["field"] / \
                      all_storage[my_slice]["weight_field"]
                only_on_root(write_image, np.log10(my_image),
                             "%s_%s.png" % (name, field), cmap_name=cmap_name)

            projection_stack.append(all_storage[my_slice]["field"])
            if weight_field is not None:
                projection_weight_stack.append(all_storage[my_slice]["field"])

        projection_stack = self.simulation.arr(projection_stack)
        projection_weight_stack = self.simulation.arr(projection_weight_stack)
                
        # Add up slices to make light cone projection.
        if (weight_field is None):
            light_cone_projection = projection_stack.sum(axis=0)
        else:
            light_cone_projection = \
              projection_stack.sum(axis=0) / \
              self.simulation.arr(projection_weight_stack).sum(axis=0)

        filename = os.path.join(self.output_dir, self.output_prefix)

        # Write image.
        if save_final_image:
            only_on_root(write_image, np.log10(light_cone_projection),
                         "%s_%s.png" % (filename, field), cmap_name=cmap_name)

        # Write stack to hdf5 file.
        if save_stack:
            self._save_light_cone_stack(field, weight_field,
                projection_stack, projection_weight_stack,
                filename=filename,
                attrs={"field_of_view": str(field_of_view),
                       "image_resolution": str(image_resolution)})
Ejemplo n.º 40
0
    def plan_cosmology_splice(self,
                              near_redshift,
                              far_redshift,
                              decimals=3,
                              filename=None,
                              start_index=0):
        r"""Create imaginary list of redshift outputs to maximally
        span a redshift interval.

        If you want to run a cosmological simulation that will have just
        enough data outputs to create a cosmology splice,
        this method will calculate a list of redshifts outputs that will
        minimally connect a redshift interval.

        Parameters
        ----------
        near_redshift : float
            The nearest (lowest) redshift in the cosmology splice list.
        far_redshift : float
            The furthest (highest) redshift in the cosmology splice list.
        decimals : int
            The decimal place to which the output redshift will be rounded.
            If the decimal place in question is nonzero, the redshift will
            be rounded up to
            ensure continuity of the splice.  Default: 3.
        filename : string
            If provided, a file will be written with the redshift outputs in
            the form in which they should be given in the enzo dataset.
            Default: None.
        start_index : int
            The index of the first redshift output.  Default: 0.

        Examples
        --------
        >>> from yt.extensions.astro_analysis.cosmological_observation.api import CosmologySplice
        >>> my_splice = CosmologySplice('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo')
        >>> my_splice.plan_cosmology_splice(0.0, 0.1, filename='redshifts.out')

        """

        z = far_redshift
        outputs = []

        while z > near_redshift:
            rounded = np.round(z, decimals=decimals)
            if rounded - z < 0:
                rounded += np.power(10.0, (-1.0 * decimals))
            z = rounded

            deltaz_max = self._deltaz_forward(
                z, self.simulation.box_size * self.max_box_fraction)
            outputs.append({'redshift': z, 'dz_max': deltaz_max})
            z -= deltaz_max

        mylog.info("%d data dumps will be needed to get from z = %f to %f." %
                   (len(outputs), near_redshift, far_redshift))

        if filename is not None:
            self.simulation._write_cosmology_outputs(filename,
                                                     outputs,
                                                     start_index,
                                                     decimals=decimals)
        return outputs
Ejemplo n.º 41
0
    def _setup_spec_cube(self):

        self.spec_cube = True
        self.geometry = "spectral_cube"

        end = min(self.dimensionality + 1, 4)
        if self.events_data:
            ctypes = self.axis_names
        else:
            ctypes = np.array(
                [self.primary_header["CTYPE%d" % (i)] for i in range(1, end)])

        log_str = "Detected these axes: " + "%s " * len(ctypes)
        mylog.info(log_str % tuple([ctype for ctype in ctypes]))

        self.lat_axis = np.zeros((end - 1), dtype="bool")
        for p in lat_prefixes:
            self.lat_axis += np_char.startswith(ctypes, p)
        self.lat_axis = np.where(self.lat_axis)[0][0]
        self.lat_name = ctypes[self.lat_axis].split("-")[0].lower()

        self.lon_axis = np.zeros((end - 1), dtype="bool")
        for p in lon_prefixes:
            self.lon_axis += np_char.startswith(ctypes, p)
        self.lon_axis = np.where(self.lon_axis)[0][0]
        self.lon_name = ctypes[self.lon_axis].split("-")[0].lower()

        if self.lat_axis == self.lon_axis and self.lat_name == self.lon_name:
            self.lat_axis = 1
            self.lon_axis = 0
            self.lat_name = "Y"
            self.lon_name = "X"

        if self.wcs.naxis > 2:

            self.spec_axis = np.zeros((end - 1), dtype="bool")
            for p in spec_names.keys():
                self.spec_axis += np_char.startswith(ctypes, p)
            self.spec_axis = np.where(self.spec_axis)[0][0]
            self.spec_name = spec_names[ctypes[self.spec_axis].split("-")[0]
                                        [0]]

            self.wcs_2d = _astropy.pywcs.WCS(naxis=2)
            self.wcs_2d.wcs.crpix = self.wcs.wcs.crpix[[
                self.lon_axis, self.lat_axis
            ]]
            self.wcs_2d.wcs.cdelt = self.wcs.wcs.cdelt[[
                self.lon_axis, self.lat_axis
            ]]
            self.wcs_2d.wcs.crval = self.wcs.wcs.crval[[
                self.lon_axis, self.lat_axis
            ]]
            self.wcs_2d.wcs.cunit = [
                str(self.wcs.wcs.cunit[self.lon_axis]),
                str(self.wcs.wcs.cunit[self.lat_axis])
            ]
            self.wcs_2d.wcs.ctype = [
                self.wcs.wcs.ctype[self.lon_axis],
                self.wcs.wcs.ctype[self.lat_axis]
            ]

            self._p0 = self.wcs.wcs.crpix[self.spec_axis]
            self._dz = self.wcs.wcs.cdelt[self.spec_axis]
            self._z0 = self.wcs.wcs.crval[self.spec_axis]
            self.spec_unit = str(self.wcs.wcs.cunit[self.spec_axis])

            if self.spectral_factor == "auto":
                self.spectral_factor = float(
                    max(self.domain_dimensions[[self.lon_axis,
                                                self.lat_axis]]))
                self.spectral_factor /= self.domain_dimensions[self.spec_axis]
                mylog.info("Setting the spectral factor to %f" %
                           (self.spectral_factor))
            Dz = self.domain_right_edge[
                self.spec_axis] - self.domain_left_edge[self.spec_axis]
            dre = self.domain_right_edge
            dre[self.spec_axis] = (self.domain_left_edge[self.spec_axis] +
                                   self.spectral_factor * Dz)
            self.domain_right_edge = dre
            self._dz /= self.spectral_factor
            self._p0 = (self._p0 - 0.5) * self.spectral_factor + 0.5

        else:

            self.wcs_2d = self.wcs
            self.spec_axis = 2
            self.spec_name = "z"
            self.spec_unit = "code_length"
Ejemplo n.º 42
0
 def _detect_output_fields(self):
     self.field_list = []
     self._axis_map = {}
     self._file_map = {}
     self._ext_map = {}
     self._scale_map = {}
     dup_field_index = {}
     # Since FITS header keywords are case-insensitive, we only pick a subset of
     # prefixes, ones that we expect to end up in headers.
     known_units = dict([(unit.lower(), unit)
                         for unit in self.ds.unit_registry.lut])
     for unit in list(known_units.values()):
         if unit in self.ds.unit_registry.prefixable_units:
             for p in ["n", "u", "m", "c", "k"]:
                 known_units[(p + unit).lower()] = p + unit
     # We create a field from each slice on the 4th axis
     if self.dataset.naxis == 4:
         naxis4 = self.dataset.primary_header["naxis4"]
     else:
         naxis4 = 1
     for i, fits_file in enumerate(self.dataset._handle._fits_files):
         for j, hdu in enumerate(fits_file):
             if (isinstance(hdu, _astropy.pyfits.BinTableHDU)
                     or hdu.header["naxis"] == 0):
                 continue
             if self._ensure_same_dims(hdu):
                 units = self._determine_image_units(hdu.header["bunit"])
                 try:
                     # Grab field name from btype
                     fname = hdu.header["btype"]
                 except KeyError:
                     # Try to guess the name from the units
                     fname = self._guess_name_from_units(units)
                     # When all else fails
                     if fname is None:
                         fname = "image_%d" % (j)
                 if self.ds.num_files > 1 and fname.startswith("image"):
                     fname += "_file_%d" % (i)
                 if ("fits", fname) in self.field_list:
                     if fname in dup_field_index:
                         dup_field_index[fname] += 1
                     else:
                         dup_field_index[fname] = 1
                     mylog.warning(
                         "This field has the same name as a previously loaded "
                         "field. Changing the name from %s to %s_%d. To avoid "
                         "this, change one of the BTYPE header keywords.",
                         fname,
                         fname,
                         dup_field_index[fname],
                     )
                     fname += "_%d" % (dup_field_index[fname])
                 for k in range(naxis4):
                     if naxis4 > 1:
                         fname += "_%s_%d" % (hdu.header["CTYPE4"], k + 1)
                     self._axis_map[fname] = k
                     self._file_map[fname] = fits_file
                     self._ext_map[fname] = j
                     self._scale_map[fname] = [0.0, 1.0]
                     if "bzero" in hdu.header:
                         self._scale_map[fname][0] = hdu.header["bzero"]
                     if "bscale" in hdu.header:
                         self._scale_map[fname][1] = hdu.header["bscale"]
                     self.field_list.append(("fits", fname))
                     self.dataset.field_units[fname] = units
                     mylog.info("Adding field %s to the list of fields.",
                                fname)
                     if units == "dimensionless":
                         mylog.warning(
                             "Could not determine dimensions for field %s, "
                             "setting to dimensionless.",
                             fname,
                         )
             else:
                 mylog.warning(
                     "Image block %s does not have the same dimensions "
                     "as the primary and will not be available as a field.",
                     hdu.name.lower(),
                 )
def find_clumps(clump, min_val, max_val, d_clump):
    mylog.info("Finding clumps: min: %e, max: %e, step: %f" % 
               (min_val, max_val, d_clump))
    if min_val >= max_val: return
    clump.find_children(min_val)

    if (len(clump.children) == 1):
        find_clumps(clump, min_val*d_clump, max_val, d_clump)

    elif (len(clump.children) > 0):
        these_children = []
        mylog.info("Investigating %d children." % len(clump.children))
        for child in clump.children:
            find_clumps(child, min_val*d_clump, max_val, d_clump)
            if ((child.children is not None) and (len(child.children) > 0)):
                these_children.append(child)
            elif (child._validate()):
                these_children.append(child)
            else:
                mylog.info(("Eliminating invalid, childless clump with " +
                            "%d cells.") % len(child.data["ones"]))
        if (len(these_children) > 1):
            mylog.info("%d of %d children survived." %
                       (len(these_children),len(clump.children)))
            clump.children = these_children
        elif (len(these_children) == 1):
            mylog.info(("%d of %d children survived, linking its " +
                        "children to parent.") % 
                        (len(these_children),len(clump.children)))
            clump.children = these_children[0].children
        else:
            mylog.info("%d of %d children survived, erasing children." %
                       (len(these_children),len(clump.children)))
            clump.children = []
Ejemplo n.º 44
0
def add_xray_emissivity_field(
    ds,
    e_min,
    e_max,
    redshift=0.0,
    metallicity=("gas", "metallicity"),
    table_type="cloudy",
    data_dir=None,
    cosmology=None,
    dist=None,
    ftype="gas",
):
    r"""Create X-ray emissivity fields for a given energy range.

    Parameters
    ----------
    e_min : float
        The minimum energy in keV for the energy band.
    e_min : float
        The maximum energy in keV for the energy band.
    redshift : float, optional
        The cosmological redshift of the source of the field. Default: 0.0.
    metallicity : str or tuple of str or float, optional
        Either the name of a metallicity field or a single floating-point
        number specifying a spatially constant metallicity. Must be in
        solar units. If set to None, no metals will be assumed. Default:
        ("gas", "metallicity")
    table_type : string, optional
        The type of emissivity table to be used when creating the fields.
        Options are "cloudy" or "apec". Default: "cloudy"
    data_dir : string, optional
        The location to look for the data table in. If not supplied, the file
        will be looked for in the location of the YT_DEST environment variable
        or in the current working directory.
    cosmology : :class:`~yt.utilities.cosmology.Cosmology`, optional
        If set and redshift > 0.0, this cosmology will be used when computing the
        cosmological dependence of the emission fields. If not set, yt's default
        LCDM cosmology will be used.
    dist : (value, unit) tuple or :class:`~yt.units.yt_array.YTQuantity`, optional
        The distance to the source, used for making intensity fields. You should
        only use this if your source is nearby (not cosmological). Default: None
    ftype : string, optional
        The field type to use when creating the fields, default "gas"

    This will create at least three fields:

    "xray_emissivity_{e_min}_{e_max}_keV" (erg s^-1 cm^-3)
    "xray_luminosity_{e_min}_{e_max}_keV" (erg s^-1)
    "xray_photon_emissivity_{e_min}_{e_max}_keV" (photons s^-1 cm^-3)

    and if a redshift or distance is specified it will create two others:

    "xray_intensity_{e_min}_{e_max}_keV" (erg s^-1 cm^-3 arcsec^-2)
    "xray_photon_intensity_{e_min}_{e_max}_keV" (photons s^-1 cm^-3 arcsec^-2)

    These latter two are really only useful when making projections.

    Examples
    --------

    >>> import yt
    >>> ds = yt.load("sloshing_nomag2_hdf5_plt_cnt_0100")
    >>> yt.add_xray_emissivity_field(ds, 0.5, 2)
    >>> p = yt.ProjectionPlot(ds, 'x', ("gas","xray_emissivity_0.5_2_keV"),
    ...                       table_type='apec')
    >>> p.save()
    """
    if not isinstance(metallicity, float) and metallicity is not None:
        try:
            metallicity = ds._get_field_info(*metallicity)
        except YTFieldNotFound:
            raise RuntimeError(
                "Your dataset does not have a {} field! ".format(metallicity) +
                "Perhaps you should specify a constant metallicity instead?")

    if table_type == "cloudy":
        # Cloudy wants to scale by nH**2
        other_n = "H_nuclei_density"
    else:
        # APEC wants to scale by nH*ne
        other_n = "El_number_density"

    def _norm_field(field, data):
        return data[ftype, "H_nuclei_density"] * data[ftype, other_n]

    ds.add_field((ftype, "norm_field"),
                 _norm_field,
                 units="cm**-6",
                 sampling_type="local")

    my_si = XrayEmissivityIntegrator(table_type,
                                     data_dir=data_dir,
                                     redshift=redshift)

    em_0 = my_si.get_interpolator("primordial", e_min, e_max)
    emp_0 = my_si.get_interpolator("primordial", e_min, e_max, energy=False)
    if metallicity is not None:
        em_Z = my_si.get_interpolator("metals", e_min, e_max)
        emp_Z = my_si.get_interpolator("metals", e_min, e_max, energy=False)

    def _emissivity_field(field, data):
        with np.errstate(all="ignore"):
            dd = {
                "log_nH": np.log10(data[ftype, "H_nuclei_density"]),
                "log_T": np.log10(data[ftype, "temperature"]),
            }

        my_emissivity = np.power(10, em_0(dd))
        if metallicity is not None:
            if isinstance(metallicity, DerivedField):
                my_Z = data[metallicity.name].to("Zsun")
            else:
                my_Z = metallicity
            my_emissivity += my_Z * np.power(10, em_Z(dd))

        my_emissivity[np.isnan(my_emissivity)] = 0

        return data[ftype, "norm_field"] * YTArray(my_emissivity,
                                                   "erg*cm**3/s")

    emiss_name = (ftype, "xray_emissivity_%s_%s_keV" % (e_min, e_max))
    ds.add_field(
        emiss_name,
        function=_emissivity_field,
        display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
        sampling_type="local",
        units="erg/cm**3/s",
    )

    def _luminosity_field(field, data):
        return data[emiss_name] * data[ftype, "mass"] / data[ftype, "density"]

    lum_name = (ftype, "xray_luminosity_%s_%s_keV" % (e_min, e_max))
    ds.add_field(
        lum_name,
        function=_luminosity_field,
        display_name=r"\rm{L}_{X} (%s-%s keV)" % (e_min, e_max),
        sampling_type="local",
        units="erg/s",
    )

    def _photon_emissivity_field(field, data):
        dd = {
            "log_nH": np.log10(data[ftype, "H_nuclei_density"]),
            "log_T": np.log10(data[ftype, "temperature"]),
        }

        my_emissivity = np.power(10, emp_0(dd))
        if metallicity is not None:
            if isinstance(metallicity, DerivedField):
                my_Z = data[metallicity.name].to("Zsun")
            else:
                my_Z = metallicity
            my_emissivity += my_Z * np.power(10, emp_Z(dd))

        return data[ftype, "norm_field"] * YTArray(my_emissivity,
                                                   "photons*cm**3/s")

    phot_name = (ftype, "xray_photon_emissivity_%s_%s_keV" % (e_min, e_max))
    ds.add_field(
        phot_name,
        function=_photon_emissivity_field,
        display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
        sampling_type="local",
        units="photons/cm**3/s",
    )

    fields = [emiss_name, lum_name, phot_name]

    if redshift > 0.0 or dist is not None:

        if dist is None:
            if cosmology is None:
                if hasattr(ds, "cosmology"):
                    cosmology = ds.cosmology
                else:
                    cosmology = Cosmology()
            D_L = cosmology.luminosity_distance(0.0, redshift)
            angular_scale = 1.0 / cosmology.angular_scale(0.0, redshift)
            dist_fac = ds.quan(
                1.0 /
                (4.0 * np.pi * D_L * D_L * angular_scale * angular_scale).v,
                "rad**-2",
            )
        else:
            redshift = 0.0  # Only for local sources!
            if not isinstance(dist, YTQuantity):
                try:
                    dist = ds.quan(dist[0], dist[1])
                except TypeError:
                    raise RuntimeError(
                        "Please specifiy 'dist' as a YTQuantity "
                        "or a (value, unit) tuple!")
            else:
                dist = ds.quan(dist.value, dist.units)
            angular_scale = dist / ds.quan(1.0, "radian")
            dist_fac = ds.quan(
                1.0 /
                (4.0 * np.pi * dist * dist * angular_scale * angular_scale).v,
                "rad**-2",
            )

        ei_name = (ftype, "xray_intensity_%s_%s_keV" % (e_min, e_max))

        def _intensity_field(field, data):
            I = dist_fac * data[emiss_name]
            return I.in_units("erg/cm**3/s/arcsec**2")

        ds.add_field(
            ei_name,
            function=_intensity_field,
            display_name=r"I_{X} (%s-%s keV)" % (e_min, e_max),
            sampling_type="local",
            units="erg/cm**3/s/arcsec**2",
        )

        i_name = (ftype, "xray_photon_intensity_%s_%s_keV" % (e_min, e_max))

        def _photon_intensity_field(field, data):
            I = (1.0 + redshift) * dist_fac * data[phot_name]
            return I.in_units("photons/cm**3/s/arcsec**2")

        ds.add_field(
            i_name,
            function=_photon_intensity_field,
            display_name=r"I_{X} (%s-%s keV)" % (e_min, e_max),
            sampling_type="local",
            units="photons/cm**3/s/arcsec**2",
        )

        fields += [ei_name, i_name]

    for field in fields:
        mylog.info("Adding ('%s','%s') field.", field[0], field[1])

    return fields
Ejemplo n.º 45
0
def create_scene(data_source, field=None, lens_type="plane-parallel"):
    r""" Set up a scene object with sensible defaults for use in volume
    rendering.

    A helper function that creates a default camera view, transfer
    function, and image size. Using these, it returns an instance
    of the Scene class, allowing one to further modify their rendering.

    This function is the same as volume_render() except it doesn't render
    the image.

    Parameters
    ----------
    data_source : :class:`yt.data_objects.data_containers.AMR3DData`
        This is the source to be rendered, which can be any arbitrary yt
        3D object
    field: string, tuple, optional
        The field to be rendered. If unspecified, this will use the
        default_field for your dataset's frontend--usually ('gas', 'density').
        A default transfer function will be built that spans the range of
        values for that given field, and the field will be logarithmically
        scaled if the field_info object specifies as such.
    lens_type: string, optional
        This specifies the type of lens to use for rendering. Current
        options are 'plane-parallel', 'perspective', and 'fisheye'. See
        :class:`yt.visualization.volume_rendering.lens.Lens` for details.
        Default: 'plane-parallel'

    Returns
    -------
    sc: Scene
        A :class:`yt.visualization.volume_rendering.scene.Scene` object
        that was constructed during the rendering. Useful for further
        modifications, rotations, etc.

    Examples
    --------

    >>> import yt
    >>> ds = yt.load("Enzo_64/DD0046/DD0046")
    >>> sc = yt.create_scene(ds)
    """

    data_source = data_source_or_all(data_source)
    sc = Scene()
    if field is None:
        field = data_source.ds.default_field
        if field not in data_source.ds.derived_field_list:
            raise YTSceneFieldNotFound("""Could not find field '%s' in %s.
                  Please specify a field in create_scene()""" %
                                       (field, data_source.ds))
        mylog.info("Setting default field to %s" % field.__repr__())

    if hasattr(data_source.ds.index, "meshes"):
        source = MeshSource(data_source, field=field)
    else:
        source = VolumeSource(data_source, field=field)

    sc.add_source(source)
    sc.add_camera(data_source=data_source, lens_type=lens_type)
    return sc
Ejemplo n.º 46
0
    def save_annotated(
        self,
        fname=None,
        label_fmt=None,
        text_annotate=None,
        dpi=100,
        sigma_clip=None,
        render=True,
    ):
        r"""Saves the most recently rendered image of the Scene to disk,
        including an image of the transfer function and and user-defined
        text.

        Once you have created a scene and rendered that scene to an image
        array, this saves that image array to disk with an optional filename.
        If an image has not yet been rendered for the current scene object,
        it forces one and writes it out.

        Parameters
        ----------
        fname: string, optional
            If specified, save the rendering as a bitmap to the file "fname".
            If unspecified, it creates a default based on the dataset filename.
            Default: None
        sigma_clip: float, optional
            Image values greater than this number times the standard deviation
            plus the mean of the image will be clipped before saving. Useful
            for enhancing images as it gets rid of rare high pixel values.
            Default: None

            floor(vals > std_dev*sigma_clip + mean)
        dpi: integer, optional
            By default, the resulting image will be the same size as the camera
            parameters.  If you supply a dpi, then the image will be scaled
            accordingly (from the default 100 dpi)
        label_fmt : str, optional
           A format specifier (e.g., label_fmt="%.2g") to use in formatting
           the data values that label the transfer function colorbar.
        text_annotate : list of iterables
           Any text that you wish to display on the image.  This should be an
           list containing a tuple of coordinates (in normalized figure
           coordinates), the text to display, and, optionally, a dictionary of
           keyword/value pairs to pass through to the matplotlib text()
           function.

           Each item in the main list is a separate string to write.
        render: boolean, optional
            If True, will render the scene before saving.
            If False, will use results of previous render if it exists.
            Default: True

        Returns
        -------
            Nothing


        Examples
        --------

        >>> sc.save_annotated("fig.png",
        ...                   text_annotate=[[(0.05, 0.05),
        ...                                   "t = {}".format(ds.current_time.d),
        ...                                   dict(horizontalalignment="left")],
        ...                                  [(0.5,0.95),
        ...                                   "simulation title",
        ...                                   dict(color="y", fontsize="24",
        ...                                        horizontalalignment="center")]])

        """
        from yt.visualization._mpl_imports import (
            FigureCanvasAgg,
            FigureCanvasPdf,
            FigureCanvasPS,
        )

        sources = list(self.sources.values())
        rensources = [s for s in sources if isinstance(s, RenderSource)]

        if fname is None:
            # if a volume source present, use its affiliated ds for fname
            if len(rensources) > 0:
                rs = rensources[0]
                basename = rs.data_source.ds.basename
                if isinstance(rs.field, str):
                    field = rs.field
                else:
                    field = rs.field[-1]
                fname = "%s_Render_%s.png" % (basename, field)
            # if no volume source present, use a default filename
            else:
                fname = "Render_opaque.png"
        suffix = get_image_suffix(fname)
        if suffix == "":
            suffix = ".png"
            fname = "%s%s" % (fname, suffix)

        render = self._sanitize_render(render)
        if render:
            self.render()
        mylog.info("Saving rendered image to %s", fname)

        # which transfer function?
        rs = rensources[0]
        tf = rs.transfer_function
        label = rs.data_source.ds._get_field_info(rs.field).get_label()

        ax = self._show_mpl(
            self._last_render.swapaxes(0, 1), sigma_clip=sigma_clip, dpi=dpi
        )
        self._annotate(ax.axes, tf, rs, label=label, label_fmt=label_fmt)

        # any text?
        if text_annotate is not None:
            f = self._render_figure
            for t in text_annotate:
                xy = t[0]
                string = t[1]
                if len(t) == 3:
                    opt = t[2]
                else:
                    opt = dict()

                # sane default
                if "color" not in opt:
                    opt["color"] = "w"

                ax.axes.text(xy[0], xy[1], string, transform=f.transFigure, **opt)

        suffix = get_image_suffix(fname)

        if suffix == ".png":
            canvas = FigureCanvasAgg(self._render_figure)
        elif suffix == ".pdf":
            canvas = FigureCanvasPdf(self._render_figure)
        elif suffix in (".eps", ".ps"):
            canvas = FigureCanvasPS(self._render_figure)
        else:
            mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
            canvas = self.canvas

        self._render_figure.canvas = canvas
        self._render_figure.tight_layout()
        self._render_figure.savefig(fname, facecolor="black", pad_inches=0)
Ejemplo n.º 47
0
    def _parse_parameter_file(self):
        """
        Get the various simulation parameters & constants.
        """
        self.domain_left_edge = np.zeros(3, dtype='float')
        self.domain_right_edge = np.zeros(3, dtype='float')+1.0
        self.dimensionality = 3
        self.refine_by = 2
        self.periodicity = (True, True, True)
        self.cosmological_simulation = True
        self.parameters = {}
        self.unique_identifier = \
            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
        self.parameters.update(constants)
        self.parameters['Time'] = 1.0
        # read the amr header
        with open(self._file_amr, 'rb') as f:
            amr_header_vals = fpu.read_attrs(f, amr_header_struct, '>')
            for to_skip in ['tl', 'dtl', 'tlold', 'dtlold', 'iSO']:
                fpu.skip(f, endian='>')
            (self.ncell) = fpu.read_vector(f, 'i', '>')[0]
            # Try to figure out the root grid dimensions
            est = int(np.rint(self.ncell**(1.0/3.0)))
            # Note here: this is the number of *cells* on the root grid.
            # This is not the same as the number of Octs.
            # domain dimensions is the number of root *cells*
            self.domain_dimensions = np.ones(3, dtype='int64')*est
            self.root_grid_mask_offset = f.tell()
            self.root_nocts = self.domain_dimensions.prod()/8
            self.root_ncells = self.root_nocts*8
            mylog.debug("Estimating %i cells on a root grid side," +
                        "%i root octs", est, self.root_nocts)
            self.root_iOctCh = fpu.read_vector(f, 'i', '>')[:self.root_ncells]
            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
                                                        order='F')
            self.root_grid_offset = f.tell()
            self.root_nhvar = fpu.skip(f, endian='>')
            self.root_nvar = fpu.skip(f, endian='>')
            # make sure that the number of root variables is a multiple of
            # rootcells
            assert self.root_nhvar % self.root_ncells == 0
            assert self.root_nvar % self.root_ncells == 0
            self.nhydro_variables = ((self.root_nhvar+self.root_nvar) /
                                     self.root_ncells)
            self.iOctFree, self.nOct = fpu.read_vector(f, 'i', '>')
            self.child_grid_offset = f.tell()
            # lextra needs to be loaded as a string, but it's actually
            # array values.  So pop it off here, and then re-insert.
            lextra = amr_header_vals.pop("lextra")
            amr_header_vals['lextra'] = np.fromstring(
                lextra, '>f4')
            self.parameters.update(amr_header_vals)
            amr_header_vals = None
            # estimate the root level
            float_center, fl, iocts, nocts, root_level = _read_art_level_info(
                f,
                [0, self.child_grid_offset], 1,
                coarse_grid=self.domain_dimensions[0])
            del float_center, fl, iocts, nocts
            self.root_level = root_level
            mylog.info("Using root level of %02i", self.root_level)
        # read the particle header
        self.particle_types = []
        self.particle_types_raw = ()
        if not self.skip_particles and self._file_particle_header:
            with open(self._file_particle_header, "rb") as fh:
                particle_header_vals = fpu.read_attrs(
                    fh, particle_header_struct, '>')
                fh.seek(seek_extras)
                n = particle_header_vals['Nspecies']
                wspecies = np.fromfile(fh, dtype='>f', count=10)
                lspecies = np.fromfile(fh, dtype='>i', count=10)
                # extras needs to be loaded as a string, but it's actually
                # array values.  So pop it off here, and then re-insert.
                extras = particle_header_vals.pop("extras")
                particle_header_vals['extras'] = np.fromstring(
                    extras, '>f4')
            self.parameters['wspecies'] = wspecies[:n]
            self.parameters['lspecies'] = lspecies[:n]
            for specie in range(n):
                self.particle_types.append("specie%i" % specie)
            self.particle_types_raw = tuple(
                self.particle_types)
            ls_nonzero = np.diff(lspecies)[:n-1]
            ls_nonzero = np.append(lspecies[0], ls_nonzero)
            self.star_type = len(ls_nonzero)
            mylog.info("Discovered %i species of particles", len(ls_nonzero))
            mylog.info("Particle populations: "+'%9i '*len(ls_nonzero),
                       *ls_nonzero)
            self._particle_type_counts = dict(
                zip(self.particle_types_raw, ls_nonzero))
            for k, v in particle_header_vals.items():
                if k in self.parameters.keys():
                    if not self.parameters[k] == v:
                        mylog.info(
                            "Inconsistent parameter %s %1.1e  %1.1e", k, v,
                            self.parameters[k])
                else:
                    self.parameters[k] = v
            self.parameters_particles = particle_header_vals
            self.parameters.update(particle_header_vals)
            self.parameters['ng'] = self.parameters['Ngridc']
            self.parameters['ncell0'] = self.parameters['ng']**3


        # setup standard simulation params yt expects to see
        self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
        self.omega_lambda = self.parameters['Oml0']
        self.omega_matter = self.parameters['Om0']
        self.hubble_constant = self.parameters['hubble']
        self.min_level = self.parameters['min_level']
        self.max_level = self.parameters['max_level']
        if self.limit_level is not None:
            self.max_level = min(
                self.limit_level, self.parameters['max_level'])
        if self.force_max_level is not None:
            self.max_level = self.force_max_level
        self.hubble_time = 1.0/(self.hubble_constant*100/3.08568025e19)
        self.current_time = self.quan(b2t(self.parameters['t']), 'Gyr')
        self.gamma = self.parameters["gamma"]
        mylog.info("Max level is %02i", self.max_level)
Ejemplo n.º 48
0
def plot_spectrum(wavelength,
                  flux,
                  filename="spectrum.png",
                  lambda_limits=None,
                  flux_limits=None,
                  title=None,
                  label=None,
                  figsize=None,
                  step=False,
                  stagger=0.2,
                  features=None,
                  axis_labels=None):
    """
    Plot a spectrum or a collection of spectra and save to disk.

    This function wraps some Matplotlib plotting functionality for
    plotting spectra generated with the :class:`~trident.SpectrumGenerator`.
    In its simplest form, it accepts a wavelength array consisting of 
    wavelength values and a corresponding flux array consisting of relative
    flux values, and it plots them and saves to disk.

    In addition, it can plot several spectra on the same axes simultaneously
    by passing a list of arrays to the ``wavelength``, ``flux`` arguments 
    (and optionally to the ``label`` and ``step`` keywords).

    Returns the Matplotlib Figure object for further processing.

    **Parameters**

    :wavelength: array of floats or list of arrays of floats

        Wavelength values in angstroms.  Either as an array of floats in the
        case of plotting a single spectrum, or as a list of arrays of floats
        in the case of plotting several spectra on the same axes.

    :flux: array of floats or list of arrays of floats

        Relative flux values (from 0 to 1) corresponding to wavelength array.
        Either as an array of floats in the case of plotting a single 
        spectrum, or as a list of arrays of floats in the case of plotting 
        several spectra on the same axes.

    :filename: string, optional

        Output filename of the plotted spectrum.  Will be a png file.
        Default: 'spectrum.png'

    :lambda_limits: tuple or list of floats, optional

        The minimum and maximum of the wavelength range (x-axis) for the plot
        in angstroms.  If specified as None, will use whole lambda range
        of spectrum. Example: (1200, 1400) for 1200-1400 Angstroms
        Default: None

    :flux_limits: tuple or list of floats, optional

        The minimum and maximum of the flux range (y-axis) for the plot.
        If specified as None, limits are automatically from
        [0, 1.1*max(flux)]. Example: (0, 1) for normal flux range before
        postprocessing.
        Default: None

    :step: boolean or list of booleans, optional

        Plot the spectrum as a series of step functions.  Appropriate for 
        plotting processed and noisy data.  Use a list of booleans when
        plotting multiple spectra, where each boolean corresponds to the entry
        in the ``wavelength`` and ``flux`` lists.

    :title: string, optional

        Optional title for plot
        Default: None

    :label: string or list of strings, optional

        Label for each spectrum to be plotted. Useful if plotting multiple
        spectra simultaneously.  Will automatically trigger a legend to be
        generated.
        Default: None

    :stagger: float, optional

        If plotting multiple spectra on the same axes, do we offset them in
        the y direction?  If set to None, no.  If set to a float, stagger them 
        by the flux value specified by this parameter.

    :features: dict, optional

        Include vertical lines with labels to represent certain spectral
        features.  Each entry in the dictionary consists of a key string to
        be overplot and the value float as to where in wavelength space it 
        will be plot as a vertical line with the corresponding label.

        Example: features={'Ly a' : 1216, 'Ly b' : 1026}

        Default: None

    :axis_labels: tuple of strings, optional

        Optionally set the axis labels directly.  If set to None, defaults to
        ('Wavelength [$\\rm\\AA$]', 'Relative Flux').
        Default: None

    **Returns**

    Matplotlib Figure object for further processing

    **Example**

    Plot a flat spectrum

    >>> import numpy as np
    >>> import trident
    >>> wavelength = np.arange(1200, 1400)
    >>> flux = np.ones(len(wavelength))
    >>> trident.plot_spectrum(wavelength, flux)

    Generate a one-zone ray, create a Lyman alpha spectrum from it, and add
    gaussian noise to it.  Plot both the raw spectrum and the noisy spectrum
    on top of each other.

    >>> import trident
    >>> ray = trident.make_onezone_ray(column_densities={'H_p0_number_density':1e21})
    >>> sg_final = trident.SpectrumGenerator(lambda_min=1200, lambda_max=1300, dlambda=0.5)
    >>> sg_final.make_spectrum(ray, lines=['Ly a'])
    >>> sg_final.save_spectrum('spec_raw.h5')
    >>> sg_final.add_gaussian_noise(10)
    >>> sg_raw = trident.load_spectrum('spec_raw.h5')
    >>> trident.plot_spectrum([sg_raw.lambda_field, sg_final.lambda_field], 
    ... [sg_raw.flux_field, sg_final.flux_field], stagger=0, step=[False, True],
    ... label=['Raw', 'Noisy'], filename='raw_and_noise.png')
    """

    # number of rows and columns
    n_rows = 1
    n_columns = 1

    # blank space between edge of figure and active plot area
    top_buffer = 0.07
    bottom_buffer = 0.15
    left_buffer = 0.06
    right_buffer = 0.03

    # blank space between plots
    hor_buffer = 0.05
    vert_buffer = 0.05

    # calculate the height and width of each panel
    panel_width = ((1.0 - left_buffer - right_buffer -
                    ((n_columns - 1) * hor_buffer)) / n_columns)
    panel_height = ((1.0 - top_buffer - bottom_buffer -
                     ((n_rows - 1) * vert_buffer)) / n_rows)

    # create a figure (figsize is in inches)
    if figsize is None:
        figsize = (12, 4)
    figure = matplotlib.figure.Figure(figsize=figsize, frameon=True)

    # get the row and column number
    my_row = 0
    my_column = 0

    # calculate the position of the bottom, left corner of this plot
    left_side = left_buffer + (my_column * panel_width) + \
                my_column * hor_buffer
    top_side = 1.0 - (top_buffer + (my_row * panel_height) + \
               my_row * vert_buffer)
    bottom_side = top_side - panel_height

    # create an axes object on which we will make the plot
    my_axes = figure.add_axes(
        (left_side, bottom_side, panel_width, panel_height))

    # Are we overplotting several spectra?  or just one?
    if not (isinstance(wavelength, list) and isinstance(flux, list)):
        wavelengths = [wavelength]
        fluxs = [flux]
        labels = [label]
        steps = [step]
    else:
        wavelengths = wavelength
        fluxs = flux
        if label is not None:
            labels = label
        else:
            labels = [None] * len(fluxs)
        if step is not None:
            steps = step
        else:
            steps = [None] * len(fluxs)

    # A running maximum of flux for use in ylim scaling in final plot
    max_flux = 0.

    for i, (wavelength, flux) in enumerate(zip(wavelengths, fluxs)):

        # Do we stagger the fluxes?
        if stagger is not None:
            flux -= stagger * i

        # Do we include labels and a legend?
        if steps[i]:
            my_axes.step(wavelength, flux, label=labels[i])
        else:
            my_axes.plot(wavelength, flux, label=labels[i])

        new_max_flux = np.max(flux)
        if new_max_flux > max_flux:
            max_flux = new_max_flux

        # Return the fluxes to their normal values
        # if they've been staggered
        if stagger is not None:
            flux += stagger * i

    # Do we include a title?
    if title is not None:
        my_axes.set_title(title)

    if lambda_limits is None:
        lambda_limits = (wavelength.min(), wavelength.max())
    my_axes.set_xlim(lambda_limits[0], lambda_limits[1])

    if flux_limits is None:
        flux_limits = (0, 1.1 * max_flux)
    my_axes.set_ylim(flux_limits[0], flux_limits[1])
    if axis_labels is None:
        axis_labels = ('Wavelength [$\\rm\\AA$]', 'Relative Flux')
    my_axes.xaxis.set_label_text(axis_labels[0])
    my_axes.yaxis.set_label_text(axis_labels[1])

    # Don't let the x-axis switch to offset values for tick labels
    my_axes.get_xaxis().get_major_formatter().set_useOffset(False)

    if label is not None: my_axes.legend()

    # Overplot the relevant features on the plot
    if features is not None:
        for feature in features:
            label = feature
            wavelength = features[feature]
            # Draw line
            my_axes.plot([wavelength, wavelength],
                         flux_limits,
                         '--',
                         color='k')
            # Write text
            text_location = flux_limits[1] - 0.05 * (flux_limits[1] -
                                                     flux_limits[0])
            my_axes.text(wavelength,
                         text_location,
                         label,
                         horizontalalignment='right',
                         verticalalignment='top',
                         rotation='vertical')

    mylog.info("Writing spectrum plot to png file: %s" % filename)
    canvas = FigureCanvasAgg(figure)
    canvas.print_figure(filename)
    return figure
Ejemplo n.º 49
0
    def _parse_parameter_file(self):
        """
        Get the various simulation parameters & constants.
        """
        self.domain_left_edge = np.zeros(3, dtype='float')
        self.domain_right_edge = np.zeros(3, dtype='float')+1.0
        self.dimensionality = 3
        self.refine_by = 2
        self.periodicity = (True, True, True)
        self.cosmological_simulation = True
        self.parameters = {}
        self.unique_identifier = \
            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
        self.parameters.update(constants)
        self.parameters['Time'] = 1.0
        self.file_count = 1
        self.filename_template = self.parameter_filename

        # read the particle header
        self.particle_types = []
        self.particle_types_raw = ()
        assert self._file_particle_header
        with open(self._file_particle_header, "rb") as fh:
            seek = 4
            fh.seek(seek)
            headerstr = np.fromfile(fh, count=1, dtype=(str,45))
            aexpn = np.fromfile(fh, count=1, dtype='>f4')
            aexp0 = np.fromfile(fh, count=1, dtype='>f4')
            amplt = np.fromfile(fh, count=1, dtype='>f4')
            astep = np.fromfile(fh, count=1, dtype='>f4')
            istep = np.fromfile(fh, count=1, dtype='>i4')
            partw = np.fromfile(fh, count=1, dtype='>f4')
            tintg = np.fromfile(fh, count=1, dtype='>f4')
            ekin = np.fromfile(fh, count=1, dtype='>f4')
            ekin1 = np.fromfile(fh, count=1, dtype='>f4')
            ekin2 = np.fromfile(fh, count=1, dtype='>f4')
            au0 = np.fromfile(fh, count=1, dtype='>f4')
            aeu0 = np.fromfile(fh, count=1, dtype='>f4')
            nrowc = np.fromfile(fh, count=1, dtype='>i4')
            ngridc = np.fromfile(fh, count=1, dtype='>i4')
            nspecs = np.fromfile(fh, count=1, dtype='>i4')
            nseed = np.fromfile(fh, count=1, dtype='>i4')
            Om0 = np.fromfile(fh, count=1, dtype='>f4')
            Oml0 = np.fromfile(fh, count=1, dtype='>f4')
            hubble = np.fromfile(fh, count=1, dtype='>f4')
            Wp5 = np.fromfile(fh, count=1, dtype='>f4')
            Ocurv = np.fromfile(fh, count=1, dtype='>f4')
            wspecies = np.fromfile(fh, count=10, dtype='>f4')
            lspecies = np.fromfile(fh, count=10, dtype='>i4')
            extras = np.fromfile(fh, count=79, dtype='>f4')
            boxsize = np.fromfile(fh, count=1, dtype='>f4')
        n = nspecs
        particle_header_vals = {}
        tmp = np.array([headerstr, aexpn, aexp0, amplt, astep, istep,
            partw, tintg, ekin, ekin1, ekin2, au0, aeu0, nrowc, ngridc,
            nspecs, nseed, Om0, Oml0, hubble, Wp5, Ocurv, wspecies,
            lspecies, extras, boxsize])
        for i in range(len(tmp)):
            a1 = dmparticle_header_struct[0][i]
            a2 = dmparticle_header_struct[1][i]
            if a2 == 1:
                particle_header_vals[a1] = tmp[i][0]
            else:
                particle_header_vals[a1] = tmp[i][:a2]
        for specie in range(n):
            self.particle_types.append("specie%i" % specie)
        self.particle_types_raw = tuple(
            self.particle_types)
        ls_nonzero = np.diff(lspecies)[:n-1]
        ls_nonzero = np.append(lspecies[0], ls_nonzero)
        self.star_type = len(ls_nonzero)
        mylog.info("Discovered %i species of particles", len(ls_nonzero))
        mylog.info("Particle populations: "+'%9i '*len(ls_nonzero),
                   *ls_nonzero)
        for k, v in particle_header_vals.items():
            if k in self.parameters.keys():
                if not self.parameters[k] == v:
                    mylog.info(
                        "Inconsistent parameter %s %1.1e  %1.1e", k, v,
                        self.parameters[k])
            else:
                self.parameters[k] = v
        self.parameters_particles = particle_header_vals
        self.parameters.update(particle_header_vals)
        self.parameters['wspecies'] = wspecies[:n]
        self.parameters['lspecies'] = lspecies[:n]
        self.parameters['ng'] = self.parameters['Ngridc']
        self.parameters['ncell0'] = self.parameters['ng']**3
        self.parameters['boxh'] = self.parameters['boxsize']
        self.parameters['total_particles'] = ls_nonzero
        self.domain_dimensions = np.ones(3,
                        dtype='int64')*2 # NOT ng

        # setup standard simulation params yt expects to see
        self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
        self.omega_lambda = particle_header_vals['Oml0']
        self.omega_matter = particle_header_vals['Om0']
        self.hubble_constant = particle_header_vals['hubble']
        self.min_level = 0
        self.max_level = 0
#        self.min_level = particle_header_vals['min_level']
#        self.max_level = particle_header_vals['max_level']
#        if self.limit_level is not None:
#            self.max_level = min(
#                self.limit_level, particle_header_vals['max_level'])
#        if self.force_max_level is not None:
#            self.max_level = self.force_max_level
        self.hubble_time = 1.0/(self.hubble_constant*100/3.08568025e19)
        self.parameters['t'] = a2b(self.parameters['aexpn'])
        self.current_time = self.quan(b2t(self.parameters['t']), 'Gyr')
        self.gamma = self.parameters["gamma"]
        mylog.info("Max level is %02i", self.max_level)
Ejemplo n.º 50
0
    def parse_subset(self, subsets='all'):
        """
        Select multiple lines based on atom, ion state, identifier, and/or
        wavelength.  Once you've created a LineDatabase, you can subselect
        certain lines from it based on line characteristics.  Preferred to
        use this method over :class:`~trident.LineDatabase.select_lines`.

        Will return the unique union of all lines matching the specified
        subsets from the :class:`~trident.LineDatabase`.

        **Parameters**

        :subsets: list of strings, optional

            List strings matching possible lines.  Strings can be of the
            form:
            * Atom - Examples: "H", "C", "Mg"
            * Ion - Examples: "H I", "H II", "C IV", "Mg II"
            * Line - Examples: "H I 1216", "C II 1336", "Mg II 1240"
            * Identifier - Examples: "Ly a", "Ly b"

            If set to None, selects **all** lines in
            :class:`~trident.LineDatabase`.
            Default: None

        **Returns**

        :line subset: list of :class:`trident.Line` objects

            A list of the Lines that were selected

        **Example**

        >>> # Get a list of all lines of Carbon, Mg II and Lyman alpha
        >>> ldb = LineDatabase('lines.txt')
        >>> lines = ldb.parse_subset(['C', 'Mg II', 'H I 1216'])
        >>> print(lines)
        """
        # if all specified, then use all lines available
        if subsets == 'all':
            self.lines_subset = self.lines_all
            mylog.info("Using all %d available lines in '%s'." % \
                       (len(self.lines_all), self.input_file))
            return self.lines_subset
        if subsets is None:
            subsets = []
        if isinstance(subsets, str):
            subsets = [subsets]
        for val in subsets:
            # try to add line based on identifier
            new_lines = self.select_lines(identifier=val)
            if len(new_lines) > 0:
                self.lines_subset.extend(new_lines)
                continue
            val = val.split()
            if len(val) == 1:
                # add all lines associated with an element
                new_lines = self.select_lines(val[0])
                self.lines_subset.extend(new_lines)
                if len(new_lines) == 0:
                    mylog.info("No lines found in subset '%s'." % val[0])
            elif len(val) == 2:
                # add all lines associated with an ion
                new_lines = self.select_lines(val[0], val[1])
                self.lines_subset.extend(new_lines)
                if len(new_lines) == 0:
                    mylog.info("No lines found in subset '%s %s'." % \
                               (val[0], val[1]))
            elif len(val) == 3:
                # add only one line
                new_lines = self.select_lines(val[0], val[1], val[2])
                self.lines_subset.extend(new_lines)
                if len(new_lines) == 0:
                    mylog.info("No lines found in subset '%s %s %s'." %
                               (val[0], val[1], val[2]))

        # Get rid of duplicates in subset and re-sort
        self.lines_subset = uniquify(self.lines_subset)
        return self.lines_subset
Ejemplo n.º 51
0
    def __getitem__(self, item):
        if item in self.data:
            return self.data[item]

        mylog.info("Splatting (%s) onto a %d by %d mesh" %
                (item, self.buff_size[0], self.buff_size[1]))

        bounds = []
        for b in self.bounds:
            if hasattr(b, "in_units"):
                b = float(b.in_units("code_length"))
            bounds.append(b)

        ftype = item[0]
        x_data = self.data_source.dd[ftype, self.x_field]
        y_data = self.data_source.dd[ftype, self.y_field]
        data = self.data_source.dd[item]

        # handle periodicity
        dx = x_data.in_units("code_length").d - bounds[0]
        dy = y_data.in_units("code_length").d - bounds[2]
        if self.periodic:
            dx %= float(self._period[0].in_units("code_length"))
            dy %= float(self._period[1].in_units("code_length"))

        # convert to pixels
        px = dx / (bounds[1] - bounds[0])
        py = dy / (bounds[3] - bounds[2])

        # select only the particles that will actually show up in the image
        mask = np.logical_and(np.logical_and(px >= 0.0, px <= 1.0),
                              np.logical_and(py >= 0.0, py <= 1.0))

        weight_field = self.data_source.weight_field
        if weight_field is None:
            weight_data = np.ones_like(data.v)
        else:
            weight_data = self.data_source.dd[weight_field]
        splat_vals = weight_data[mask]*data[mask]

        # splat particles
        buff = np.zeros(self.buff_size)
        buff_mask = np.zeros(self.buff_size).astype('int')
        add_points_to_greyscale_image(buff,
                                      buff_mask,
                                      px[mask],
                                      py[mask],
                                      splat_vals)
        # remove values in no-particle region
        buff[buff_mask==0] = np.nan
        ia = ImageArray(buff, units=data.units,
                        info=self._get_info(item))

        # divide by the weight_field, if needed
        if weight_field is not None:
            weight_buff = np.zeros(self.buff_size)
            weight_buff_mask = np.zeros(self.buff_size).astype('int')
            add_points_to_greyscale_image(weight_buff,
                                          weight_buff_mask,
                                          px[mask],
                                          py[mask],
                                          weight_data[mask])
            weight_array = ImageArray(weight_buff,
                                      units=weight_data.units,
                                      info=self._get_info(item))
            # remove values in no-particle region
            weight_buff[weight_buff_mask==0] = np.nan
            locs = np.where(weight_array > 0)
            ia[locs] /= weight_array[locs]

        self.data[item] = ia
        return self.data[item]
Ejemplo n.º 52
0
def write_projection(
    data,
    filename,
    colorbar=True,
    colorbar_label=None,
    title=None,
    vmin=None,
    vmax=None,
    limits=None,
    take_log=True,
    figsize=(8, 6),
    dpi=100,
    cmap_name=None,
    extent=None,
    xlabel=None,
    ylabel=None,
):
    r"""Write a projection or volume rendering to disk with a variety of
    pretty parameters such as limits, title, colorbar, etc.  write_projection
    uses the standard matplotlib interface to create the figure.  N.B. This code
    only works *after* you have created the projection using the standard
    framework (i.e. the Camera interface or off_axis_projection).

    Accepts an NxM sized array representing the projection itself as well
    as the filename to which you will save this figure.  Note that the final
    resolution of your image will be a product of dpi/100 * figsize.

    Parameters
    ----------
    data : array_like
        image array as output by off_axis_projection or camera.snapshot()
    filename : string
        the filename where the data will be saved
    colorbar : boolean
        do you want a colorbar generated to the right of the image?
    colorbar_label : string
        the label associated with your colorbar
    title : string
        the label at the top of the figure
    vmin : float or None
        the lower limit of the zaxis (part of matplotlib api)
    vmax : float or None
        the lower limit of the zaxis (part of matplotlib api)
    take_log : boolean
        plot the log of the data array (and take the log of the limits if set)?
    figsize : array_like
        width, height in inches of final image
    dpi : int
        final image resolution in pixels / inch
    cmap_name : string
        The name of the colormap.

    Examples
    --------

    >>> image = off_axis_projection(ds, c, L, W, N, "Density", no_ghost=False)
    >>> write_projection(image, 'test.png',
                         colorbar_label="Column Density (cm$^{-2}$)",
                         title="Offaxis Projection", vmin=1e-5, vmax=1e-3,
                         take_log=True)
    """
    if cmap_name is None:
        cmap_name = ytcfg.get("yt", "default_colormap")
    import matplotlib.colors
    import matplotlib.figure

    from ._mpl_imports import FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS

    if limits is not None:
        if vmin is not None or vmax is not None:
            raise ValueError(
                "The `limits` keyword argument is deprecated and can not "
                "be used simultaneously with `vmin` or `vmax`.")
        issue_deprecation_warning(
            "The `limits` keyword argument is deprecated and will "
            "be removed in a future version of yt. Use `vmin` and `vmax` instead.",
            since="4.0.0",
            removal="4.1.0",
        )
        vmin, vmax = limits

    # If this is rendered as log, then apply now.
    if take_log:
        norm_cls = matplotlib.colors.LogNorm
    else:
        norm_cls = matplotlib.colors.Normalize
    norm = norm_cls(vmin=vmin, vmax=vmax)

    # Create the figure and paint the data on
    fig = matplotlib.figure.Figure(figsize=figsize)
    ax = fig.add_subplot(111)

    cax = ax.imshow(
        data.to_ndarray(),
        norm=norm,
        extent=extent,
        cmap=cmap_name,
    )

    if title:
        ax.set_title(title)

    if xlabel:
        ax.set_xlabel(xlabel)
    if ylabel:
        ax.set_ylabel(ylabel)

    # Suppress the x and y pixel counts
    if extent is None:
        ax.set_xticks(())
        ax.set_yticks(())

    # Add a color bar and label if requested
    if colorbar:
        cbar = fig.colorbar(cax)
        if colorbar_label:
            cbar.ax.set_ylabel(colorbar_label)

    suffix = get_image_suffix(filename)

    if suffix == "":
        suffix = ".png"
        filename = f"{filename}{suffix}"
    mylog.info("Saving plot %s", filename)
    if suffix == ".pdf":
        canvas = FigureCanvasPdf(fig)
    elif suffix in (".eps", ".ps"):
        canvas = FigureCanvasPS(fig)
    else:
        canvas = FigureCanvasAgg(fig)

    fig.tight_layout()

    canvas.print_figure(filename, dpi=dpi)
    return filename
def write_synchrotron_hdf5(ds,
                           write_fields,
                           sanitize_fieldnames=False,
                           extend_cells=None):
    """
    Calculate the emissivity of Stokes I, Q, and U in each cell. Write them
    to a new HDF5 file and copy metadata from the original HDF5 files.
    The new HDF5 file can then be loaded into yt and make plots.
    """
    # The new file name that we are going to write to
    sfname = synchrotron_filename(ds, extend_cells=extend_cells)

    h5_handle = ds._handle

    # Keep a list of the fields that were in the original hdf5 file
    orig_field_list = [
        field.decode() for field in h5_handle['unknown names'].value[:, 0]
    ]

    if not isinstance(write_fields, list):
        write_fields = [write_fields]

    comm = communication_system.communicators[-1]
    # Only do the IO in the master process
    if comm.rank == 0:
        exist_fields = []
        written_fields = []
        if os.path.isfile(sfname):
            with h5py.File(sfname, 'r') as h5file:
                # First check if the fields already exist
                if 'unknown names' in h5file.keys():
                    exist_fields = [
                        f.decode('utf8') for f in h5file['unknown names'].value
                    ]
                for field in write_fields.copy():
                    #TODO: Check if the data are the same
                    if field in exist_fields and field in h5file.keys():
                        write_fields.remove(field)
                    elif field in h5file.keys():
                        # Some fields are not recorded by already in the dataset
                        write_fields.remove(field)
                        exist_fields.append(field)
                        sanitize_fieldnames = True
                mylog.info('Closing File: %s', h5file)
            mylog.info('Fields already exist: %s', sorted(exist_fields))
        else:
            # File does not exist
            pass

    # On all processes
    write_fields = comm.mpi_bcast(write_fields, root=0)

    if write_fields:
        mylog.info('Fields to be generated: %s', write_fields)
        if comm.rank == 0:
            wdata = {}
        for field in write_fields:
            mylog.debug('Preparing field: %s', field)
            # Here we do the actual calculation (in yt) and save the grid data
            data = prep_field_data(ds, field)
            # On master process only
            if comm.rank == 0:
                wdata[field] = data

        # On master process only
        if comm.rank == 0:
            with h5py.File(sfname, 'a') as h5file:
                mylog.info('Writing to %s', h5file)
                mylog.info('Fields to be written: %s', write_fields)
                for field in write_fields:
                    # Writing data to HDF5 file
                    mylog.info('Writing field: %s', field)
                    h5file.create_dataset(field, wdata[field].shape,
                                          wdata[field].dtype, wdata[field])
                    written_fields.append(field)
                del wdata

                # Go through all the items in the FLASH hdf5 file
                for name, v in h5_handle.items():
                    if name in itertools.chain(orig_field_list, h5file.keys()):
                        # Do not write the fields that were present in the FLASH hdf5 file
                        # or other metadata that had been copied in the destination hdf5 file
                        pass
                    elif name == 'unknown names':
                        pass
                    else:
                        # Keep other simulation information
                        h5file.create_dataset(v.name, v.shape, v.dtype,
                                              v.value)

    if write_fields or sanitize_fieldnames:
        if comm.rank == 0:
            with h5py.File(sfname, 'a') as h5file:
                # Add the new field names that have already been written
                all_fields = list(set(written_fields + exist_fields))
                # Remove the field that does not exist in the dataset
                for field in all_fields:
                    if field not in h5file.keys():
                        all_fields.remove(field)
                # We need to encode the field name to binary format
                bnames = [f.encode('utf8') for f in all_fields]
                # Create a new dataset for the field names
                if 'unknown names' in h5file.keys():
                    del h5file['unknown names']
                h5file.create_dataset('unknown names', data=bnames)
                mylog.info('Field List: %s', sorted(bnames))
Ejemplo n.º 54
0
    def _set_code_unit_attributes(self):
        # required method
        # devnote: this method is never defined in the parent abstract class Dataset
        # but it is called in Dataset.set_code_units(), which is part of Dataset.__init__()
        # so it must be defined here.

        # devnote: this gets called later than Dataset._override_code_units()
        # This is the reason why it uses setdefaultattr: it will only fill in the gaps left
        # by the "override", instead of overriding them again.
        # For the same reason, self.units_override is set, as well as corresponding *_unit instance attributes
        # which may include up to 3 of the following items: length, time, mass, velocity, number_density, temperature

        # note: yt sets hydrogen mass equal to proton mass, amrvac doesn't.
        mp_cgs = self.quan(1.672621898e-24,
                           'g')  # This value is taken from AstroPy
        He_abundance = 0.1  # hardcoded parameter in AMRVAC

        # get self.length_unit if overrides are supplied, otherwise use default
        length_unit = getattr(self, 'length_unit', self.quan(1, 'cm'))

        # 1. calculations for mass, density, numberdensity
        if 'mass_unit' in self.units_override:
            # in this case unit_mass is supplied (and has been set as attribute)
            mass_unit = self.mass_unit
            density_unit = mass_unit / length_unit**3
            numberdensity_unit = density_unit / (
                (1.0 + 4.0 * He_abundance) * mp_cgs)
        else:
            # other case: numberdensity is supplied. Fall back to one (default) if no overrides supplied
            numberdensity_override = self.units_override.get(
                'numberdensity_unit', (1, 'cm**-3'))
            if 'numberdensity_unit' in self.units_override:  # print similar warning as yt when overriding numberdensity
                mylog.info("Overriding numberdensity_unit: %g %s.",
                           *numberdensity_override)
            numberdensity_unit = self.quan(
                *numberdensity_override
            )  # numberdensity is never set as attribute
            density_unit = (1.0 +
                            4.0 * He_abundance) * mp_cgs * numberdensity_unit
            mass_unit = density_unit * length_unit**3

        # 2. calculations for velocity
        if 'time_unit' in self.units_override:
            # in this case time was supplied
            velocity_unit = length_unit / self.time_unit
        else:
            # other case: velocity was supplied. Fall back to None if no overrides supplied
            velocity_unit = getattr(self, 'velocity_unit', None)

        # 3. calculations for pressure and temperature
        if velocity_unit is None:
            # velocity and time not given, see if temperature is given. Fall back to one (default) if not
            temperature_unit = getattr(self, 'temperature_unit',
                                       self.quan(1, 'K'))
            pressure_unit = ((2.0 + 3.0 * He_abundance) * numberdensity_unit *
                             kb_cgs * temperature_unit).in_cgs()
            velocity_unit = (np.sqrt(pressure_unit / density_unit)).in_cgs()
        else:
            # velocity is not zero if either time was given OR velocity was given
            pressure_unit = (density_unit * velocity_unit**2).in_cgs()
            temperature_unit = (pressure_unit /
                                ((2.0 + 3.0 * He_abundance) *
                                 numberdensity_unit * kb_cgs)).in_cgs()

        # 4. calculations for magnetic unit and time
        time_unit = getattr(
            self, 'time_unit', length_unit /
            velocity_unit)  # if time given use it, else calculate
        magnetic_unit = (np.sqrt(4 * np.pi * pressure_unit)).to('gauss')

        setdefaultattr(self, 'mass_unit', mass_unit)
        setdefaultattr(self, 'density_unit', density_unit)
        setdefaultattr(self, 'numberdensity_unit', numberdensity_unit)

        setdefaultattr(self, 'length_unit', length_unit)
        setdefaultattr(self, 'velocity_unit', velocity_unit)
        setdefaultattr(self, 'time_unit', time_unit)

        setdefaultattr(self, 'temperature_unit', temperature_unit)
        setdefaultattr(self, 'pressure_unit', pressure_unit)
        setdefaultattr(self, 'magnetic_unit', magnetic_unit)
def add_synchrotron_dtau_emissivity(ds,
                                    ptype='lobe',
                                    nu=(1.4, 'GHz'),
                                    method='nearest_weighted',
                                    proj_axis='x',
                                    extend_cells=32):
    me = yt.utilities.physical_constants.mass_electron  #9.109E-28
    c = yt.utilities.physical_constants.speed_of_light  #2.998E10
    e = yt.utilities.physical_constants.elementary_charge  #4.803E-10 esu

    gamma_min = yt.YTQuantity(10, 'dimensionless')
    # Index for electron power law distribution
    p = 2.0
    pol_ratio = (p + 1.) / (p + 7. / 3.)
    # Fitted constants for the approximated power-law + exponential spectra
    # Integral of 2*F(x) -> tot_const*(nu**-2)*exp(-nu/nuc)
    # 2*F(x) for the total intensity (parallel + perpendicular)
    tot_const = 4.1648

    stokes = StokesFieldName(ptype, nu, proj_axis)
    nu = yt.YTQuantity(*nu)

    if proj_axis == 'x':
        los = [1., 0., 0.]
        xvec = [0., 1., 0.]
        yvec = [0., 0., 1.]
    elif proj_axis == 'y':
        los = [0., 1., 0.]
        xvec = [0., 0., 1.]
        yvec = [1., 0., 0.]
    elif proj_axis == 'z':
        los = [0., 0., 1.]
        xvec = [1., 0., 0.]
        yvec = [0., 1., 0.]
    elif type(proj_axis) is list:
        los = proj_axis
        if los[0] != 0.:  # not perpendicular to z-axis
            xvec = [0., 1., 0.]
            yvec = [0., 0., 1.]
        # TODO: xvec and yvec for arbitrary proj_axis
        else:
            raise NotImplementedError

    else:
        raise NotImplementedError
    los = np.array(los)
    xvec = np.array(xvec)
    yvec = np.array(yvec)
    los = los / np.sqrt(np.sum(los * los))

    # Determine the version of the simulation
    if ('io', 'particle_tau1') in ds.field_list:
        version = 2018
        mylog.info(
            'Field particle_tau1 detected. This is a version 2018 simulation.')
    elif ('io', 'particle_type') in ds.field_list:
        version = 2016
        mylog.info(
            'Field particle_type detected. This is a version 2016 simulation.')
    else:
        version = 2015
        # Update the particle file handler in yt; raise exception if not successful
        success = setup_part_file(ds)
        mylog.info('Assuming this is a version 2015 simulation.')
        if not success:
            raise IOError

    if ('gas', 'jet_volume_fraction') not in ds.derived_field_list:
        ds.add_field(('gas', 'jet_volume_fraction'),
                     function=_jet_volume_fraction,
                     display_name="Jet Volume Fraction",
                     sampling_type='cell')

    #def _gamc(field, data):
    #    # The new cutoff gamma
    #    # Note that particle_dens could be negative due to quadratic interpolation!
    #    gamc = (np.abs(data['particle_dens'] / data['particle_den0']))**(1./3.) \
    #           / (data['particle_dtau'] + np.finfo(np.float64).tiny)
    #    ind = np.where(gamc < 0.0)[0]
    #    if ind.shape[0] > 0:
    #        print(ind)
    #        print(gamc)

    #    return gamc

    #pfname = 'particle_gamc_dtau'
    #ds.add_field(pfname, function=_gamc, sampling_type='particle',
    #             units='', force_override=True)
    #deposit_field = 'particle_gamc_dtau'

    def _synchrotron_spec(field, data):
        # To convert from FLASH "none" unit to cgs unit, times the B field from FLASH by sqrt(4*pi)
        Bvec = np.array([data['particle_magx'],\
                         data['particle_magy'],\
                         data['particle_magz']])*np.sqrt(4.0*np.pi)
        Bvec = data.apply_units(Bvec, 'gauss')

        # Calculate sin(a), in which a is the pitch angle of the electrons relative to B field.
        # See _nn_emissivity_i for more comments
        cross = np.cross(los, Bvec, axisb=0)
        Bsina = np.sqrt(np.sum(cross * cross, axis=-1))
        Bsina = data.apply_units(Bsina, 'gauss')
        #B = np.sqrt(np.sum(Bvec*Bvec, axis=0))

        if version == 2018:
            # Return for the FieldDetector; do nothing
            if isinstance(data, FieldDetector):
                return data['particle_dens']/data['particle_den0']**(1./3.)/ \
                        (data['particle_tau1']+data['particle_cmb1'])

            gamc = (np.abs(data['particle_dens'] / data['particle_den0']))**(1./3.) \
                   / (data['particle_tau1'] + data['particle_cmb1'] + np.finfo(np.float64).tiny)
        else:
            # Return for the FieldDetector; do nothing
            if isinstance(data, FieldDetector):
                return data['particle_dens']/data['particle_den0']**(1./3.)/ \
                        (data['particle_dtau'])

            if np.any(data['particle_dtau'] < 0.0):
                print('negative tau!')
                print(data)
                print(data['particle_tau'])
                print(data['particle_dtau'])

            # The new cutoff gamma
            # Note that particle_dens could be negative due to quadratic interpolation!
            gamc = (np.abs(data['particle_dens'] / data['particle_den0']))**(1./3.) \
                   / (data['particle_dtau'] + np.finfo(np.float64).tiny)

        ind = np.where(gamc <= 0.0)[0]
        if ind.shape[0] > 0:
            print(ind)
            print(gamc[ind])

        #gamc = data[(ptype, 'particle_gamc')]

        # Cutoff frequency
        nuc = 3.0 * gamc**2 * e * Bsina / (4.0 * np.pi * me * c)
        #nu = data.get_field_parameter("frequency", default=yt.YTQuantity(1.4, 'GHz'))

        # B**1.5 is taken from the grid data
        norm = 3.0 / 8.0 * e**3.5 / (c**2.5 * me**1.5 * (np.pi)**0.5)
        # P is taken from the grid data
        N0 = 3.0 / me / c / c / (np.log(gamc / gamma_min)) / yt.YTQuantity(
            4. * np.pi, 'sr')

        # Fix where the cutoff gamma < 0
        N0[ind] = 0.0

        return np.clip(N0 * norm * nu**(-0.5) * np.exp(-nu / nuc),
                       np.finfo(np.float64).tiny, None)

    # particle field name
    pfname = 'particle_sync_spec_%s' % stokes.nu_str
    ds.add_field(pfname,
                 function=_synchrotron_spec,
                 sampling_type='particle',
                 units='cm**(3/4)*s**(3/2)/g**(3/4)/sr',
                 force_override=True)
    deposit_field = pfname

    #try:
    ds.add_particle_filter(ptype)
    #except:
    #    raise NotImplementedError

    ###########################################################################
    ## Nearest Neighbor method
    ###########################################################################
    #fname_nn = ds.add_deposited_particle_field(
    #        (ptype, 'particle_sync_spec_%s' % stokes.nu_str), 'nearest', extend_cells=extend_cells)

    sync_unit = ds.field_info[deposit_field].units
    if method == "nearest":
        field_name = "%s_nn_%s"
    elif method == "nearest_weighted":
        field_name = "%s_nnw_%s"
    else:
        raise NotImplementedError
    field_name = field_name % (ptype, deposit_field.replace('particle_', ''))
    ad = ds.all_data()

    #print(ad[ptype, "particle_position"])

    def _nnw_deposit_field(field, data):
        if isinstance(data, FieldDetector):
            jetfluid = data['velocity_magnitude'] > lobe_v
            d = data.deposit(data[ptype, "particle_position"],
                             (ptype, deposit_field),
                             method=method)
            d[jetfluid] = 0.0
            return d
        #pos = ad[ptype, "particle_position"]
        if ptype == 'lobe':
            # Calling other fields must preceed the more intensive
            # deposit function to prevent repeated iterations
            jetfluid = data['velocity_magnitude'] > lobe_v
        alldata = True
        if alldata:
            pos = ad[ptype, "particle_position"]
            # Deposit using the distance weighted log field
            #fields = [np.log(ad[ptype, deposit_field])]
            fields = [ad[ptype, deposit_field]]
            fields = [np.ascontiguousarray(f) for f in fields]
        else:
            left_edge = np.maximum(data.LeftEdge-data.dds*extend_cells,\
                                   data.ds.domain_left_edge)
            right_edge = np.minimum(data.RightEdge+data.dds*extend_cells,\
                                    data.ds.domain_right_edge)
            box = data.ds.box(left_edge, right_edge)
            pos = box[ptype, "particle_position"]
            fields = [box[ptype, deposit_field]]
        d = data.deposit(pos, fields, method=method, extend_cells=extend_cells)
        if np.all(d == 0.0):
            d = data.ds.arr(d, input_units=sync_unit)
        else:
            # Conver the log back to real value
            #d = data.ds.arr(np.exp(d), input_units=sync_unit)
            d = data.ds.arr(d, input_units=sync_unit)
        if ptype == 'lobe':
            d[jetfluid] = 0.0
        return d

    fname_nn = ("deposit", field_name)
    ds.add_field(fname_nn,
                 function=_nnw_deposit_field,
                 sampling_type="cell",
                 units=sync_unit,
                 take_log=True,
                 force_override=True,
                 validators=[ValidateSpatial()])

    def _nn_emissivity_i(field, data):
        '''
        Emissivity using nearest neighbor. Integrate over line of sight to get intensity.
        '''
        Bvec = np.array([data[('gas', 'magnetic_field_x')],\
                         data[('gas', 'magnetic_field_y')],\
                         data[('gas', 'magnetic_field_z')]])

        # Calculate sin(a), in which a is the pitch angle of the electrons relative to B field.
        # We only see the radiation from electrons with pitch angles pointing to line of sight.
        cross = np.cross(los, Bvec, axisb=0)
        # B * sin(alpha) = (B * |(los x Bvec)|/|los|/|Bvec|)
        # = |(los x Bvec)|
        Bsina = np.sqrt(np.sum(cross * cross, axis=-1))
        Bsina = data.apply_units(Bsina, 'gauss')

        # P * (B*sina)^1.5
        PBsina = data['gas', 'pressure'] * Bsina**1.5

        frac = data['gas', 'jet_volume_fraction']

        # Use gamc from deposit field
        #############################
        # fname_nn = 'ptype_nnw_gamc_dtau'
        #if ('flash', fname_nn[1]) in data.ds.field_list:
        #    gamc = data[('flash', fname_nn[1])]
        #else:
        #    gamc = data[fname_nn]
        #bad_mask = gamc <= gamma_min
        ## Cutoff frequency
        #nuc = 3.0*gamc**2*e*Bsina/(4.0*np.pi*me*c)
        ##nu = data.get_field_parameter("frequency", default=yt.YTQuantity(1.4, 'GHz'))

        #norm = 3.0*Bsina**1.5/8.0*e**3.5/(c**2.5*me**1.5*(np.pi)**0.5)
        #N0 = 3.0*data['gas', 'pressure']/me/c/c/(np.log(gamc/gamma_min))/yt.YTQuantity(4.*np.pi, 'sr')
        #N0[bad_mask] = 0.0

        return PBsina * frac * tot_const * data[fname_nn]

    ds.add_field(stokes.I,
                 function=_nn_emissivity_i,
                 sampling_type='cell',
                 display_name=stokes.display_name('I'),
                 units='Jy/cm/arcsec**2',
                 take_log=True,
                 force_override=True,
                 validators=[ValidateSpatial()])

    def _cos_theta(field, data):
        Bvec = np.stack([data[('gas', 'magnetic_field_x')],\
                         data[('gas', 'magnetic_field_y')],\
                         data[('gas', 'magnetic_field_z')]], axis=-1)
        Bproj = Bvec - np.expand_dims(np.inner(Bvec, los), -1) * los
        # cos = cos(theta), theta is the angle between projected B and xvec
        # Ignore invalid 0/0 warnings
        with np.errstate(invalid='ignore'):
            cos = np.inner(Bproj, xvec) / np.sqrt(
                np.sum(Bproj * Bproj, axis=-1))
        return cos

    ds.add_field('cos',
                 function=_cos_theta,
                 sampling_type='cell',
                 display_name='cos theta',
                 units='',
                 take_log=False,
                 force_override=True)

    def _nn_emissivity_q(field, data):
        # pol_ratio = (perp - para) / (perp + para)
        # The minus accounts for the perpendicular polarization
        rtn = -data[stokes.I] * pol_ratio * (2 * data['cos']**2 - 1.0)
        rtn[~np.isfinite(data['cos'])] = 0.0
        return rtn

    ds.add_field(stokes.Q,
                 function=_nn_emissivity_q,
                 sampling_type='cell',
                 display_name=stokes.display_name('Q'),
                 units='Jy/cm/arcsec**2',
                 take_log=False,
                 force_override=True)

    def _nn_emissivity_u(field, data):
        sin = np.sqrt(1.0 - data['cos']**2)
        rtn = -data[stokes.I] * pol_ratio * 2 * sin * data['cos']
        rtn[~np.isfinite(data['cos'])] = 0.0
        return rtn

    ds.add_field(stokes.U,
                 function=_nn_emissivity_u,
                 sampling_type='cell',
                 display_name=stokes.display_name('U'),
                 units='Jy/cm/arcsec**2',
                 take_log=False,
                 force_override=True)

    return pfname, fname_nn, stokes.I, stokes.nu_str