Example #1
0
 def print_key_parameters(self):
     if is_root():
         mylog.info("YTProfileDataset")
         for a in ["dimensionality", "profile_dimensions"] + [
                 f"{ax}_{attr}" for ax in "xyz"[:self.dimensionality]
                 for attr in ["field", "range", "log"]
         ]:
             v = getattr(self, a)
             mylog.info("Parameters: %-25s = %s", a, v)
     super().print_key_parameters()
Example #2
0
 def __init__(self,
              ts,
              num_readers=1,
              num_writers=None,
              outbase="rockstar_halos",
              particle_type="all",
              force_res=None,
              total_particles=None,
              dm_only=False,
              particle_mass=None,
              min_halo_size=25):
     if is_root():
         mylog.info(
             "The citation for the Rockstar halo finder can be found at")
         mylog.info("http://adsabs.harvard.edu/abs/2013ApJ...762..109B")
     ParallelAnalysisInterface.__init__(self)
     # Decide how we're working.
     if ytcfg.getboolean("yt", "inline") == True:
         self.runner = InlineRunner()
     else:
         self.runner = StandardRunner(num_readers, num_writers)
     self.num_readers = self.runner.num_readers
     self.num_writers = self.runner.num_writers
     mylog.info("Rockstar is using %d readers and %d writers",
                self.num_readers, self.num_writers)
     # Note that Rockstar does not support subvolumes.
     # We assume that all of the snapshots in the time series
     # use the same domain info as the first snapshots.
     if not isinstance(ts, DatasetSeries):
         ts = DatasetSeries([ts])
     self.ts = ts
     self.particle_type = particle_type
     self.outbase = outbase
     self.min_halo_size = min_halo_size
     if force_res is None:
         tds = ts[-1]  # Cache a reference
         self.force_res = tds.index.get_smallest_dx().in_units("Mpc/h")
         # We have to delete now to wipe the index
         del tds
     else:
         self.force_res = force_res
     self.total_particles = total_particles
     self.dm_only = dm_only
     self.particle_mass = particle_mass
     # Setup pool and workgroups.
     self.pool, self.workgroup = self.runner.setup_pool()
     p = self._setup_parameters(ts)
     params = self.comm.mpi_bcast(p, root=self.pool['readers'].ranks[0])
     self.__dict__.update(params)
     self.handler = rockstar_interface.RockstarInterface(self.ts)
 def __init__(self, ts, num_readers = 1, num_writers = None,
         outbase="rockstar_halos", particle_type="all",
         force_res=None, total_particles=None, dm_only=False,
         particle_mass=None, min_halo_size=25):
     if is_root():
         mylog.info("The citation for the Rockstar halo finder can be found at")
         mylog.info("http://adsabs.harvard.edu/abs/2013ApJ...762..109B")
     ParallelAnalysisInterface.__init__(self)
     # Decide how we're working.
     if ytcfg.getboolean("yt", "inline") == True:
         self.runner = InlineRunner()
     else:
         self.runner = StandardRunner(num_readers, num_writers)
     self.num_readers = self.runner.num_readers
     self.num_writers = self.runner.num_writers
     mylog.info("Rockstar is using %d readers and %d writers",
         self.num_readers, self.num_writers)
     # Note that Rockstar does not support subvolumes.
     # We assume that all of the snapshots in the time series
     # use the same domain info as the first snapshots.
     if not isinstance(ts, DatasetSeries):
         ts = DatasetSeries([ts])
     self.ts = ts
     self.particle_type = particle_type
     self.outbase = outbase
     self.min_halo_size = min_halo_size
     if force_res is None:
         tds = ts[-1] # Cache a reference
         self.force_res = tds.index.get_smallest_dx().in_units("Mpc/h")
         # We have to delete now to wipe the index
         del tds
     else:
         self.force_res = force_res
     self.total_particles = total_particles
     self.dm_only = dm_only
     self.particle_mass = particle_mass
     # Setup pool and workgroups.
     self.pool, self.workgroup = self.runner.setup_pool()
     p = self._setup_parameters(ts)
     params = self.comm.mpi_bcast(p, root = self.pool['readers'].ranks[0])
     self.__dict__.update(params)
     self.handler = rockstar_interface.RockstarInterface(self.ts)
Example #4
0
    def __init__(self,
                 ds,
                 normal,
                 field,
                 velocity_bounds,
                 center="c",
                 width=(1.0, "unitary"),
                 dims=100,
                 thermal_broad=False,
                 atomic_weight=56.,
                 depth=(1.0, "unitary"),
                 depth_res=256,
                 method="integrate",
                 weight_field=None,
                 no_shifting=False,
                 north_vector=None,
                 no_ghost=True):
        r""" Initialize a PPVCube object.

        Parameters
        ----------
        ds : dataset
            The dataset.
        normal : array_like or string
            The normal vector along with to make the projections. If an array, it
            will be normalized. If a string, it will be assumed to be along one of the
            principal axes of the domain ("x", "y", or "z").
        field : string
            The field to project.
        velocity_bounds : tuple
            A 4-tuple of (vmin, vmax, nbins, units) for the velocity bounds to
            integrate over. 
        center : A sequence of floats, a string, or a tuple.
            The coordinate of the center of the image. If set to 'c', 'center' or
            left blank, the plot is centered on the middle of the domain. If set to
            'max' or 'm', the center will be located at the maximum of the
            ('gas', 'density') field. Centering on the max or min of a specific
            field is supported by providing a tuple such as ("min","temperature") or
            ("max","dark_matter_density"). Units can be specified by passing in *center*
            as a tuple containing a coordinate and string unit name or by passing
            in a YTArray. If a list or unitless array is supplied, code units are
            assumed.
        width : float, tuple, or YTQuantity.
            The width of the projection. A float will assume the width is in code units.
            A (value, unit) tuple or YTQuantity allows for the units of the width to be
            specified. Implies width = height, e.g. the aspect ratio of the PPVCube's 
            spatial dimensions is 1.
        dims : integer, optional
            The spatial resolution of the cube. Implies nx = ny, e.g. the 
            aspect ratio of the PPVCube's spatial dimensions is 1.
        thermal_broad : boolean, optional
            Whether or not to broaden the line using the gas temperature. Default: False.
        atomic_weight : float, optional
            Set this value to the atomic weight of the particle that is emitting the line
            if *thermal_broad* is True. Defaults to 56 (Fe).
        depth : A tuple or a float, optional
            A tuple containing the depth to project through and the string
            key of the unit: (width, 'unit').  If set to a float, code units
            are assumed. Only for off-axis cubes.
        depth_res : integer, optional
            The resolution of integration along the line of sight for off-axis cubes. Default: 256
        method : string, optional
            Set the projection method to be used.
            "integrate" : line of sight integration over the line element.
            "sum" : straight summation over the line of sight.
        weight_field : string, optional
            The name of the weighting field.  Set to None for no weight.
        no_shifting : boolean, optional
            If set, no shifting due to velocity will occur but only thermal broadening.
            Should not be set when *thermal_broad* is False, otherwise nothing happens!
        north_vector : a sequence of floats
            A vector defining the 'up' direction. This option sets the orientation of 
            the plane of projection. If not set, an arbitrary grid-aligned north_vector 
            is chosen. Ignored in the case of on-axis cubes.
        no_ghost: bool, optional
            Optimization option for off-axis cases. If True, homogenized bricks will
            extrapolate out from grid instead of interpolating from
            ghost zones that have to first be calculated.  This can
            lead to large speed improvements, but at a loss of
            accuracy/smoothness in resulting image.  The effects are
            less notable when the transfer function is smooth and
            broad. Default: True

        Examples
        --------
        >>> i = 60*np.pi/180.
        >>> L = [0.0,np.sin(i),np.cos(i)]
        >>> cube = PPVCube(ds, L, "density", (-5.,4.,100,"km/s"), width=(10.,"kpc"))
        """

        self.ds = ds
        self.field = field
        self.width = width
        self.particle_mass = atomic_weight * mh
        self.thermal_broad = thermal_broad
        self.no_shifting = no_shifting

        if not isinstance(normal, string_types):
            width = ds.coordinates.sanitize_width(normal, width, depth)
            width = tuple(el.in_units('code_length').v for el in width)

        if no_shifting and not thermal_broad:
            raise RuntimeError(
                "no_shifting cannot be True when thermal_broad is False!")

        self.center = ds.coordinates.sanitize_center(center, normal)[0]

        self.nx = dims
        self.ny = dims
        self.nv = velocity_bounds[2]

        if method not in ["integrate", "sum"]:
            raise RuntimeError("Only the 'integrate' and 'sum' projection +"
                               "methods are supported in PPVCube.")

        dd = ds.all_data()
        fd = dd._determine_fields(field)[0]
        self.field_units = ds._get_field_info(fd).units

        self.vbins = ds.arr(
            np.linspace(velocity_bounds[0], velocity_bounds[1],
                        velocity_bounds[2] + 1), velocity_bounds[3])

        self._vbins = self.vbins.copy()
        self.vmid = 0.5 * (self.vbins[1:] + self.vbins[:-1])
        self.vmid_cgs = self.vmid.in_cgs().v
        self.dv = self.vbins[1] - self.vbins[0]
        self.dv_cgs = self.dv.in_cgs().v

        self.current_v = 0.0

        _vlos = create_vlos(normal, self.no_shifting)
        self.ds.add_field(("gas", "v_los"), function=_vlos, units="cm/s")

        _intensity = self._create_intensity()
        self.ds.add_field(("gas", "intensity"),
                          function=_intensity,
                          units=self.field_units)

        if method == "integrate" and weight_field is None:
            self.proj_units = str(ds.quan(1.0, self.field_units + "*cm").units)
        elif method == "sum":
            self.proj_units = self.field_units

        storage = {}
        pbar = get_pbar("Generating cube.", self.nv)
        for sto, i in parallel_objects(range(self.nv), storage=storage):
            self.current_v = self.vmid_cgs[i]
            if isinstance(normal, string_types):
                prj = ds.proj("intensity",
                              ds.coordinates.axis_id[normal],
                              method=method,
                              weight_field=weight_field)
                buf = prj.to_frb(width, self.nx,
                                 center=self.center)["intensity"]
            else:
                buf = off_axis_projection(ds,
                                          self.center,
                                          normal,
                                          width, (self.nx, self.ny, depth_res),
                                          "intensity",
                                          north_vector=north_vector,
                                          no_ghost=no_ghost,
                                          method=method,
                                          weight=weight_field).swapaxes(0, 1)
            sto.result_id = i
            sto.result = buf
            pbar.update(i)
        pbar.finish()

        self.data = ds.arr(np.zeros((self.nx, self.ny, self.nv)),
                           self.proj_units)
        if is_root():
            for i, buf in sorted(storage.items()):
                self.data[:, :, i] = buf.transpose()

        self.axis_type = "velocity"

        # Now fix the width
        if iterable(self.width):
            self.width = ds.quan(self.width[0], self.width[1])
        elif not isinstance(self.width, YTQuantity):
            self.width = ds.quan(self.width, "code_length")

        self.ds.field_info.pop(("gas", "intensity"))
        self.ds.field_info.pop(("gas", "v_los"))
    def __init__(self, ds, normal, field, velocity_bounds, center="c", 
                 width=(1.0,"unitary"), dims=100, thermal_broad=False,
                 atomic_weight=56., depth=(1.0,"unitary"), depth_res=256,
                 method="integrate", weight_field=None, no_shifting=False,
                 north_vector=None, no_ghost=True):
        r""" Initialize a PPVCube object.

        Parameters
        ----------
        ds : dataset
            The dataset.
        normal : array_like or string
            The normal vector along with to make the projections. If an array, it
            will be normalized. If a string, it will be assumed to be along one of the
            principal axes of the domain ("x", "y", or "z").
        field : string
            The field to project.
        velocity_bounds : tuple
            A 4-tuple of (vmin, vmax, nbins, units) for the velocity bounds to
            integrate over. 
        center : A sequence of floats, a string, or a tuple.
            The coordinate of the center of the image. If set to 'c', 'center' or
            left blank, the plot is centered on the middle of the domain. If set to
            'max' or 'm', the center will be located at the maximum of the
            ('gas', 'density') field. Centering on the max or min of a specific
            field is supported by providing a tuple such as ("min","temperature") or
            ("max","dark_matter_density"). Units can be specified by passing in *center*
            as a tuple containing a coordinate and string unit name or by passing
            in a YTArray. If a list or unitless array is supplied, code units are
            assumed.
        width : float, tuple, or YTQuantity.
            The width of the projection. A float will assume the width is in code units.
            A (value, unit) tuple or YTQuantity allows for the units of the width to be
            specified. Implies width = height, e.g. the aspect ratio of the PPVCube's 
            spatial dimensions is 1.
        dims : integer, optional
            The spatial resolution of the cube. Implies nx = ny, e.g. the 
            aspect ratio of the PPVCube's spatial dimensions is 1.
        thermal_broad : boolean, optional
            Whether or not to broaden the line using the gas temperature. Default: False.
        atomic_weight : float, optional
            Set this value to the atomic weight of the particle that is emitting the line
            if *thermal_broad* is True. Defaults to 56 (Fe).
        depth : A tuple or a float, optional
            A tuple containing the depth to project through and the string
            key of the unit: (width, 'unit').  If set to a float, code units
            are assumed. Only for off-axis cubes.
        depth_res : integer, optional
            The resolution of integration along the line of sight for off-axis cubes. Default: 256
        method : string, optional
            Set the projection method to be used.
            "integrate" : line of sight integration over the line element.
            "sum" : straight summation over the line of sight.
        weight_field : string, optional
            The name of the weighting field.  Set to None for no weight.
        no_shifting : boolean, optional
            If set, no shifting due to velocity will occur but only thermal broadening.
            Should not be set when *thermal_broad* is False, otherwise nothing happens!
        north_vector : a sequence of floats
            A vector defining the 'up' direction. This option sets the orientation of 
            the plane of projection. If not set, an arbitrary grid-aligned north_vector 
            is chosen. Ignored in the case of on-axis cubes.
        no_ghost: bool, optional
            Optimization option for off-axis cases. If True, homogenized bricks will
            extrapolate out from grid instead of interpolating from
            ghost zones that have to first be calculated.  This can
            lead to large speed improvements, but at a loss of
            accuracy/smoothness in resulting image.  The effects are
            less notable when the transfer function is smooth and
            broad. Default: True

        Examples
        --------
        >>> i = 60*np.pi/180.
        >>> L = [0.0,np.sin(i),np.cos(i)]
        >>> cube = PPVCube(ds, L, "density", (-5.,4.,100,"km/s"), width=(10.,"kpc"))
        """

        self.ds = ds
        self.field = field
        self.width = width
        self.particle_mass = atomic_weight*mh
        self.thermal_broad = thermal_broad
        self.no_shifting = no_shifting

        if not isinstance(normal, string_types):
            width = ds.coordinates.sanitize_width(normal, width, depth)
            width = tuple(el.in_units('code_length').v for el in width)

        if no_shifting and not thermal_broad:
            raise RuntimeError("no_shifting cannot be True when thermal_broad is False!")

        self.center = ds.coordinates.sanitize_center(center, normal)[0]

        self.nx = dims
        self.ny = dims
        self.nv = velocity_bounds[2]

        if method not in ["integrate","sum"]:
            raise RuntimeError("Only the 'integrate' and 'sum' projection +"
                               "methods are supported in PPVCube.")

        dd = ds.all_data()
        fd = dd._determine_fields(field)[0]
        self.field_units = ds._get_field_info(fd).units

        self.vbins = ds.arr(np.linspace(velocity_bounds[0],
                                        velocity_bounds[1],
                                        velocity_bounds[2]+1), velocity_bounds[3])

        self._vbins = self.vbins.copy()
        self.vmid = 0.5*(self.vbins[1:]+self.vbins[:-1])
        self.vmid_cgs = self.vmid.in_cgs().v
        self.dv = self.vbins[1]-self.vbins[0]
        self.dv_cgs = self.dv.in_cgs().v

        self.current_v = 0.0

        _vlos = create_vlos(normal, self.no_shifting)
        self.ds.add_field(("gas","v_los"), function=_vlos, units="cm/s")

        _intensity = self._create_intensity()
        self.ds.add_field(("gas","intensity"), function=_intensity, units=self.field_units)

        if method == "integrate" and weight_field is None:
            self.proj_units = str(ds.quan(1.0, self.field_units+"*cm").units)
        elif method == "sum":
            self.proj_units = self.field_units

        storage = {}
        pbar = get_pbar("Generating cube.", self.nv)
        for sto, i in parallel_objects(range(self.nv), storage=storage):
            self.current_v = self.vmid_cgs[i]
            if isinstance(normal, string_types):
                prj = ds.proj("intensity", ds.coordinates.axis_id[normal], method=method,
                              weight_field=weight_field)
                buf = prj.to_frb(width, self.nx, center=self.center)["intensity"]
            else:
                buf = off_axis_projection(ds, self.center, normal, width,
                                          (self.nx, self.ny, depth_res), "intensity",
                                          north_vector=north_vector, no_ghost=no_ghost,
                                          method=method, weight=weight_field).swapaxes(0,1)
            sto.result_id = i
            sto.result = buf
            pbar.update(i)
        pbar.finish()

        self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.proj_units)
        if is_root():
            for i, buf in sorted(storage.items()):
                self.data[:,:,i] = buf.transpose()

        self.axis_type = "velocity"

        # Now fix the width
        if iterable(self.width):
            self.width = ds.quan(self.width[0], self.width[1])
        elif not isinstance(self.width, YTQuantity):
            self.width = ds.quan(self.width, "code_length")

        self.ds.field_info.pop(("gas","intensity"))
        self.ds.field_info.pop(("gas","v_los"))
Example #6
0
def parallel_trees(trees,
                   save_every=None,
                   filename=None,
                   njobs=0,
                   dynamic=False):
    """
    Iterate over a list of trees in parallel.

    Trees are divided up between the available processor groups. Analysis
    field values can then be assigned to halos within the tree. The trees
    will be saved either at the end of the loop or after a number of trees
    given by the ``save_every`` keyword are completed.

    This uses the yt
    :func:`~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_objects`
    function, which is parallelized with MPI underneath and so is suitable
    for parallelism across compute nodes.

    Parameters
    ----------
    trees : list of :class:`~ytree.data_structures.tree_node.TreeNode` objects
        The trees to be iterated over in parallel.
    save_every : optional, int or False
        Number of trees to be completed before results are saved. This is
        used to save intermediate results in case scripts need to be restarted.
        If None, save will only occur after iterating over all trees. If False,
        no saving will be done.
        Default: None
    filename : optional, string
        The name of the new arbor to be saved. If None, the naming convention
        will follow the filename keyword of the
        :func:`~ytree.data_structures.arbor.Arbor.save_arbor` function.
        Default: None
    njobs : optional, int
        The number of process groups for parallel iteration. Set to 0 to make
        the same number of process groups as available processors. Hence,
        each tree will be allocated to a single processor. Set to a number
        less than the total number of processors to create groups with multiple
        processors, which will allow for further parallelization within a tree.
        For example, running with 8 processors and setting njobs to 4 will result
        in 4 groups of 2 processors each.
        Default: 0
    dynamic : optional, bool
        Set to False to divide iterations evenly among process groups. Set to
        True to allocate iterations with a task queue. If True, the number of
        processors available will be one fewer than the total as one will act
        as the task queue server.
        Default: False

    Examples
    --------

    >>> import ytree
    >>> a = ytree.load("arbor/arbor.h5")
    >>> a.add_analysis_field("test_field", default=-1, units="Msun")
    >>> trees = list(a[:])
    >>> for tree in ytree.parallel_trees(trees):
    ...     for node in tree["forest"]:
    ...         node["test_field"] = 2 * node["mass"] # some analysis

    See Also
    --------
    parallel_tree_nodes, parallel_nodes

    """

    arbor = trees[0].arbor
    afields = _get_analysis_fields(arbor)

    nt = len(trees)
    save = True
    if save_every is None:
        save_every = nt
    elif save_every is False:
        save_every = nt
        save = False
    nb = int(np.ceil(nt / save_every))

    for ib in range(nb):
        start = ib * save_every
        end = min(start + save_every, nt)

        arbor_storage = {}
        for tree_store, itree in parallel_objects(range(start, end),
                                                  storage=arbor_storage,
                                                  njobs=njobs,
                                                  dynamic=dynamic):

            my_tree = trees[itree]
            yield my_tree

            if is_root():
                my_root = my_tree.find_root()
                tree_store.result_id = (my_root._arbor_index, my_tree.tree_id)

                # If the tree is not a root, only save the "tree" selection
                # as we could overwrite other trees in the forest.
                if my_tree.is_root:
                    selection = "forest"
                else:
                    selection = "tree"

                tree_store.result = {
                    field: my_tree[selection, field]
                    for field in afields
                }

            else:
                tree_store.result_id = None

        # combine results for all trees
        if is_root():
            for itree in range(start, end):
                my_tree = trees[itree]
                my_root = my_tree.find_root()
                key = (my_root._arbor_index, my_tree.tree_id)
                data = arbor_storage[key]

                if my_tree.is_root:
                    indices = slice(None)
                else:
                    indices = [my_tree._tree_field_indices]

                for field in afields:
                    if field not in my_root.field_data:
                        arbor._node_io._initialize_analysis_field(
                            my_root, field)
                    my_root.field_data[field][indices] = data[field]

            if save:
                fn = arbor.save_arbor(filename=filename, trees=trees)
                arbor = ytree_load(fn)
                trees = [
                    regenerate_node(arbor, tree, new_index=i)
                    for i, tree in enumerate(trees)
                ]
Example #7
0
def parallel_tree_nodes(tree, group="forest", njobs=0, dynamic=False):
    """
    Iterate over nodes in a single tree in parallel.

    Nodes are divided up between the available processor groups. Analysis
    field values can then be assigned to each node (halo).

    Note, unlike the parallel_trees and parallel_nodes function, no saving
    is performed internally. Results saving with the
    :func:`~ytree.data_structures.arbor.Arbor.save_arbor` must be done
    manually.

    This uses the yt
    :func:`~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_objects`
    function, which is parallelized with MPI underneath and so is suitable
    for parallelism across compute nodes.

    Parameters
    ----------
    tree : :class:`~ytree.data_structures.tree_node.TreeNode`
        The tree whose nodes will be iterated over.
    group : optional, str ("forest", "tree", or "prog")
        Determines the nodes to be iterated over in the tree: "forest" for
        all nodes in the forest, "tree" for all nodes in the tree, or "prog"
        for all nodes in the line of main progenitors.
        Default: "forest"
    njobs : optional, int
        The number of process groups for parallel iteration. Set to 0 to make
        the same number of process groups as available processors. Hence,
        each node will be allocated to a single processor. Set to a number
        less than the total number of processors to create groups with multiple
        processors, which will allow for further parallelization. For example,
        running with 8 processors and setting njobs to 4 will result in 4
        groups of 2 processors each.
        Default: 0
    dynamic : optional, bool
        Set to False to divide iterations evenly among process groups. Set to
        True to allocate iterations with a task queue. If True, the number of
        processors available will be one fewer than the total as one will act
        as the task queue server.
        Default: False

    Examples
    --------

    >>> import ytree
    >>> a = ytree.load("arbor/arbor.h5")
    >>> a.add_analysis_field("test_field", default=-1, units="Msun")
    >>> trees = list(a[:])
    >>> for tree in trees:
    ...     for node in ytree.parallel_tree_nodes(tree):
    ...         node["test_field"] = 2 * node["mass"] # some analysis

    See Also
    --------
    parallel_trees, parallel_nodes

    """

    afields = _get_analysis_fields(tree.arbor)

    my_halos = list(tree[group])

    tree_storage = {}
    for halo_store, ihalo in parallel_objects(range(len(my_halos)),
                                              storage=tree_storage,
                                              njobs=njobs,
                                              dynamic=dynamic):

        my_halo = my_halos[ihalo]
        yield my_halo
        if is_root():
            halo_store.result_id = my_halo.tree_id
            halo_store.result = {field: my_halo[field] for field in afields}
        else:
            halo_store.result_id = -1

    # combine results for this tree
    if is_root():
        for tree_id, result in sorted(tree_storage.items()):
            if tree_id == -1:
                continue
            my_halo = tree.get_node("forest", tree_id)

            for field, value in result.items():
                my_halo[field] = value