Пример #1
0
 def __init__(
     self, start_point, end_point, ds=None, field_parameters=None, data_source=None
 ):
     validate_3d_array(start_point)
     validate_3d_array(end_point)
     validate_object(ds, Dataset)
     validate_object(field_parameters, dict)
     validate_object(data_source, YTSelectionContainer)
     super(YTRay, self).__init__(ds, field_parameters, data_source)
     if isinstance(start_point, YTArray):
         self.start_point = self.ds.arr(start_point).to("code_length")
     else:
         self.start_point = self.ds.arr(start_point, "code_length", dtype="float64")
     if isinstance(end_point, YTArray):
         self.end_point = self.ds.arr(end_point).to("code_length")
     else:
         self.end_point = self.ds.arr(end_point, "code_length", dtype="float64")
     if (self.start_point < self.ds.domain_left_edge).any() or (
         self.end_point > self.ds.domain_right_edge
     ).any():
         mylog.warn(
             "Ray start or end is outside the domain. "
             + "Returned data will only be for the ray section inside the domain."
         )
     self.vec = self.end_point - self.start_point
     self._set_center(self.start_point)
     self.set_field_parameter("center", self.start_point)
     self._dts, self._ts = None, None
Пример #2
0
    def __init__(
        self,
        base_region,
        domain,
        ds,
        over_refine_factor=1,
        num_ghost_zones=0,
        base_grid=None,
    ):
        super(RAMSESDomainSubset, self).__init__(
            base_region, domain, ds, over_refine_factor, num_ghost_zones
        )

        self._base_grid = base_grid

        if num_ghost_zones > 0:
            if not all(ds.periodicity):
                mylog.warn("Ghost zones will wrongly assume the domain to be periodic.")
            # Create a base domain *with no self._base_domain.fwidth
            base_domain = RAMSESDomainSubset(
                ds.all_data(), domain, ds, over_refine_factor
            )
            self._base_domain = base_domain
        elif num_ghost_zones < 0:
            raise RuntimeError(
                "Cannot initialize a domain subset with a negative number of ghost zones,"
                " was called with num_ghost_zones=%s" % num_ghost_zones
            )
Пример #3
0
    def set_units(self):
        super().set_units()
        res = self.parameters["res"]
        for i, ax in enumerate("xy"):
            self.unit_registry.add(f"{ax}pixels", res[i], dimensions.length)

        if res[0] == res[1]:
            self.unit_registry.add("pixels", res[0], dimensions.length)
        else:
            mylog.warn("x and y pixels have different sizes.")
Пример #4
0
 def add_default_quantities(self, field_type='halos'):
     for field in ["particle_identifier", "particle_mass",
                   "particle_position_x", "particle_position_y",
                   "particle_position_z", "virial_radius"]:
         field_name = (field_type, field)
         if field_name not in self.halos_ds.field_list:
             mylog.warn("Halo dataset %s has no field %s." %
                        (self.halos_ds, str(field_name)))
             continue
         self.add_quantity(field, field_type=field_type, prepend=True)
Пример #5
0
    def save_spectrum(self, filename='spectrum.h5', format=None):
        """
        Save the current spectrum data to an output file.  Unless specified,
        the output data format will be determined by the suffix of the filename
        provided ("h5":HDF5, "fits":FITS, all other:ASCII).

        ASCII data is stored as a tab-delimited text file.

        **Parameters**

        :filename: string, optional

            Output filename for storing the data.
            Default: 'spectrum.h5'

        :format: string, optional

            Data format of the output file.  Valid examples are "HDF5",
            "FITS", and "ASCII".  If None is set, selects based on suffix
            of filename.
            Default: None

        **Example**

        Save a spectrum to disk, load it from disk, and plot it.

        >>> import trident
        >>> ray = trident.make_onezone_ray()
        >>> sg = trident.SpectrumGenerator('COS')
        >>> sg.make_spectrum(ray)
        >>> sg.save_spectrum('temp.h5')
        >>> sg.clear_spectrum()
        >>> sg.load_spectrum('temp.h5')
        >>> sg.plot_spectrum('temp.png')
        """
        if format is None:
            if filename.endswith('.h5') or filename.endswith('hdf5'):
                self._write_spectrum_hdf5(filename)
            elif filename.endswith('.fits') or filename.endswith('FITS'):
                self._write_spectrum_fits(filename)
            else:
                self._write_spectrum_ascii(filename)
        elif format == 'HDF5':
            self._write_spectrum_hdf5(filename)
        elif format == 'FITS':
            self._write_spectrum_fits(filename)
        elif format == 'ASCII':
            self._write_spectrum_ascii(filename)
        else:
            mylog.warn(
                "Invalid format.  Must be 'HDF5', 'FITS', 'ASCII'. Defaulting to ASCII."
            )
            self._write_spectrum_ascii(filename)
Пример #6
0
    def get_bbox(self, left, right):
        """
        Given left and right indicies, return a mask and
        set of offsets+lengths into the sdf data.
        """
        ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
        iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
        if np.any(iright-ileft) > self.domain_dims:
            mylog.warn("Attempting to get data from bounding box larger than the domain. You may want to check your units.")
        #iright[iright <= ileft+1] += 1

        return self.get_ibbox(ileft, iright)
Пример #7
0
    def read_namelist(self):
        """Read the namelist.txt file in the output folder, if present"""
        namelist_file = os.path.join(self.root_folder, 'namelist.txt')
        if os.path.exists(namelist_file):
            try:
                with open(namelist_file, 'r') as f:
                    nml = f90nml.read(f)
            except ImportError as e:
                nml = "An error occurred when reading the namelist: %s" % str(e)
            except ValueError as e:
                mylog.warn("Could not parse `namelist.txt` file as it was malformed: %s" % str(e))
                return

            self.parameters['namelist'] = nml
Пример #8
0
    def get_bbox(self, left, right):
        """
        Given left and right indicies, return a mask and
        set of offsets+lengths into the sdf data.
        """
        ileft = np.floor(
            (left - self.rmin) / self.domain_width * self.domain_dims)
        iright = np.floor(
            (right - self.rmin) / self.domain_width * self.domain_dims)
        if np.any(iright - ileft) > self.domain_dims:
            mylog.warn(
                "Attempting to get data from bounding box larger than the domain. You may want to check your units."
            )
        #iright[iright <= ileft+1] += 1

        return self.get_ibbox(ileft, iright)
Пример #9
0
def subfind_field_list(fh, ptype, pcount):
    fields = []
    offset_fields = []
    for field in fh.keys():
        if "PartType" in field:
            # These are halo member particles
            continue
        elif isinstance(fh[field], h5py.Group):
            my_fields, my_offset_fields = \
              subfind_field_list(fh[field], ptype, pcount)
            fields.extend(my_fields)
            my_offset_fields.extend(offset_fields)
        else:
            if not fh[field].size % pcount[ptype]:
                my_div = fh[field].size / pcount[ptype]
                fname = fh[field].name[fh[field].name.find(ptype) +
                                       len(ptype) + 1:]
                if my_div > 1:
                    for i in range(int(my_div)):
                        fields.append((ptype, "%s_%d" % (fname, i)))
                else:
                    fields.append((ptype, fname))
            elif ptype == "SUBFIND" and \
              not fh[field].size % fh["/SUBFIND"].attrs["Number_of_groups"]:
                # These are actually FOF fields, but they were written after
                # a load balancing step moved halos around and thus they do not
                # correspond to the halos stored in the FOF group.
                my_div = fh[field].size / fh["/SUBFIND"].attrs[
                    "Number_of_groups"]
                fname = fh[field].name[fh[field].name.find(ptype) +
                                       len(ptype) + 1:]
                if my_div > 1:
                    for i in range(int(my_div)):
                        fields.append(("FOF", "%s_%d" % (fname, i)))
                else:
                    fields.append(("FOF", fname))
                offset_fields.append(fname)
            else:
                mylog.warn("Cannot add field (%s, %s) with size %d." % \
                           (ptype, fh[field].name, fh[field].size))
                continue
    return fields, offset_fields
Пример #10
0
 def __init__(self, function=None, width=None, filename=None):
     self.kernel = []
     self.filename = filename
     self.function = function
     self.width = width
     # if filename is defined, use it
     if filename is not None:
         # Check to see if the file is in the local dir
         if os.path.isfile(filename):
             lsf_file = open(filename, 'r')
         # otherwise use the file in the lsf_kernels dir
         else:
             filename2 = os.path.join(trident_path(), "data", \
                                      "lsf_kernels", filename)
             if os.path.isfile(filename2):
                 lsf_file = open(filename2, 'r')
             else:
                 raise RuntimeError(
                     "LSF filename not found in current " +
                     "directory or in %s/data/lsf_kernels directory" %
                     trident_path())
         for line in lsf_file:
             self.kernel.append(float(line.split()[1]))
         lsf_file.close()
         self.kernel = np.array(self.kernel)
         self.width = self.kernel.size
     elif function is not None and width is not None:
         if function == 'boxcar':
             if width % 2 == 0:
                 mylog.warn(
                     "LSF kernel must have an odd length. Reducing kernel size by 1."
                 )
                 width -= 1
             self.kernel = np.ones(width) / width
         elif function == 'gaussian':
             from astropy.convolution import Gaussian1DKernel
             self.kernel = Gaussian1DKernel(width)
     else:
         raise RuntimeError(
             "Either LSF filename OR function+width must be specified.")
Пример #11
0
def subfind_field_list(fh, ptype, pcount):
    fields = []
    offset_fields = []
    for field in fh.keys():
        if "PartType" in field:
            # These are halo member particles
            continue
        elif isinstance(fh[field], h5py.Group):
            my_fields, my_offset_fields = \
              subfind_field_list(fh[field], ptype, pcount)
            fields.extend(my_fields)
            my_offset_fields.extend(offset_fields)
        else:
            if not fh[field].size % pcount[ptype]:
                my_div = fh[field].size / pcount[ptype]
                fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
                if my_div > 1:
                    for i in range(int(my_div)):
                        fields.append((ptype, "%s_%d" % (fname, i)))
                else:
                    fields.append((ptype, fname))
            elif ptype == "SUBFIND" and \
              not fh[field].size % fh["/SUBFIND"].attrs["Number_of_groups"]:
                # These are actually FOF fields, but they were written after 
                # a load balancing step moved halos around and thus they do not
                # correspond to the halos stored in the FOF group.
                my_div = fh[field].size / fh["/SUBFIND"].attrs["Number_of_groups"]
                fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1:]
                if my_div > 1:
                    for i in range(int(my_div)):
                        fields.append(("FOF", "%s_%d" % (fname, i)))
                else:
                    fields.append(("FOF", fname))
                offset_fields.append(fname)
            else:
                mylog.warn("Cannot add field (%s, %s) with size %d." % \
                           (ptype, fh[field].name, fh[field].size))
                continue
    return fields, offset_fields
Пример #12
0
    def _parse_parameter_file(self):
        # hard-coded -- not provided by headers
        self.dimensionality = 3
        self.refine_by = 2
        self.parameters["HydroMethod"] = 'artio'
        self.parameters["Time"] = 1.  # default unit is 1...

        # read header
        self.unique_identifier = \
            int(os.stat(self.parameter_filename)[stat.ST_CTIME])

        self.num_grid = self._handle.num_grid
        self.domain_dimensions = np.ones(3, dtype='int32') * self.num_grid
        self.domain_left_edge = np.zeros(3, dtype="float64")
        self.domain_right_edge = np.ones(3, dtype='float64') * self.num_grid

        # TODO: detect if grid exists
        self.min_level = 0  # ART has min_level=0
        self.max_level = self.artio_parameters["grid_max_level"][0]

        # TODO: detect if particles exist
        if self._handle.has_particles:
            self.num_species = self.artio_parameters["num_particle_species"][0]
            self.particle_variables = [["PID", "SPECIES"]
                                       for i in range(self.num_species)]
            self.particle_types_raw = \
                self.artio_parameters["particle_species_labels"]
            self.particle_types = tuple(self.particle_types_raw)

            for species in range(self.num_species):
                # Mass would be best as a derived field,
                # but wouldn't detect under 'all'
                if self.artio_parameters["particle_species_labels"][species]\
                        == "N-BODY":
                    self.particle_variables[species].append("MASS")

                if self.artio_parameters["num_primary_variables"][species] > 0:
                    self.particle_variables[species].extend(
                        self.artio_parameters[
                            "species_%02d_primary_variable_labels" %
                            (species, )])
                if self.artio_parameters["num_secondary_variables"][
                        species] > 0:
                    self.particle_variables[species].extend(
                        self.artio_parameters[
                            "species_%02d_secondary_variable_labels" %
                            (species, )])

        else:
            self.num_species = 0
            self.particle_variables = []
            self.particle_types = ()
        self.particle_types_raw = self.particle_types

        self.current_time = self.quan(
            self._handle.tphys_from_tcode(self.artio_parameters["tl"][0]),
            "yr")

        # detect cosmology
        if "abox" in self.artio_parameters:
            self.cosmological_simulation = True

            abox = self.artio_parameters["abox"][0]
            self.omega_lambda = self.artio_parameters["OmegaL"][0]
            self.omega_matter = self.artio_parameters["OmegaM"][0]
            self.hubble_constant = self.artio_parameters["hubble"][0]
            self.current_redshift = 1.0 / self.artio_parameters["auni"][0] - 1.0
            self.current_redshift_box = 1.0 / abox - 1.0

            self.parameters["initial_redshift"] =\
                1.0 / self.artio_parameters["auni_init"][0] - 1.0
            self.parameters["CosmologyInitialRedshift"] =\
                self.parameters["initial_redshift"]

            self.parameters['unit_m'] = self.artio_parameters["mass_unit"][0]
            self.parameters['unit_t'] =\
                self.artio_parameters["time_unit"][0] * abox**2
            self.parameters['unit_l'] =\
                self.artio_parameters["length_unit"][0] * abox

            if self.artio_parameters["DeltaDC"][0] != 0:
                mylog.warn(
                    "DeltaDC != 0, which implies auni != abox.  Be sure you understand which expansion parameter is appropriate for your use! (Gnedin, Kravtsov, & Rudd 2011)"
                )
        else:
            self.cosmological_simulation = False

            self.parameters['unit_l'] = self.artio_parameters["length_unit"][0]
            self.parameters['unit_t'] = self.artio_parameters["time_unit"][0]
            self.parameters['unit_m'] = self.artio_parameters["mass_unit"][0]

        # hard coded assumption of 3D periodicity
        self.periodicity = (True, True, True)
Пример #13
0
    def make_light_ray(self, seed=None,
                       start_position=None, end_position=None,
                       trajectory=None,
                       fields=None, setup_function=None,
                       solution_filename=None, data_filename=None,
                       get_los_velocity=True, redshift=None,
                       njobs=-1):
        """
        make_light_ray(seed=None, start_position=None, end_position=None,
                       trajectory=None, fields=None, setup_function=None,
                       solution_filename=None, data_filename=None,
                       get_los_velocity=True, redshift=None,
                       njobs=-1)

        Create a light ray and get field values for each lixel.  A light
        ray consists of a list of field values for cells intersected by
        the ray and the path length of the ray through those cells.
        Light ray data can be written out to an hdf5 file.

        Parameters
        ----------
        seed : optional, int
            Seed for the random number generator.
            Default: None.
        start_position : optional, list of floats
            Used only if creating a light ray from a single dataset.
            The coordinates of the starting position of the ray.
            Default: None.
        end_position : optional, list of floats
            Used only if creating a light ray from a single dataset.
            The coordinates of the ending position of the ray.
            Default: None.
        trajectory : optional, list of floats
            Used only if creating a light ray from a single dataset.
            The (r, theta, phi) direction of the light ray.  Use either 
            end_position or trajectory, not both.
            Default: None.
        fields : optional, list
            A list of fields for which to get data.
            Default: None.
        setup_function : optional, callable, accepts a ds
            This function will be called on each dataset that is loaded 
            to create the light ray.  For, example, this can be used to 
            add new derived fields.
            Default: None.
        solution_filename : optional, string
            Path to a text file where the trajectories of each
            subray is written out.
            Default: None.
        data_filename : optional, string
            Path to output file for ray data.
            Default: None.
        get_los_velocity : optional, bool
            If True, the line of sight velocity is calculated for
            each point in the ray.
            Default: True.
        redshift : optional, float
            Used with light rays made from single datasets to specify a 
            starting redshift for the ray.  If not used, the starting 
            redshift will be 0 for a non-cosmological dataset and 
            the dataset redshift for a cosmological dataset.
            Default: None.
        njobs : optional, int
            The number of parallel jobs over which the segments will 
            be split.  Choose -1 for one processor per segment.
            Default: -1.

        Examples
        --------

        Make a light ray from multiple datasets:
        
        >>> import yt
        >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
        ...     LightRay
        >>> my_ray = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo",
        ...                   0., 0.1, time_data=False)
        ...
        >>> my_ray.make_light_ray(seed=12345,
        ...                       solution_filename="solution.txt",
        ...                       data_filename="my_ray.h5",
        ...                       fields=["temperature", "density"],
        ...                       get_los_velocity=True)

        Make a light ray from a single dataset:

        >>> import yt
        >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
        ...     LightRay
        >>> my_ray = LightRay("IsolatedGalaxy/galaxy0030/galaxy0030")
        ...
        >>> my_ray.make_light_ray(start_position=[0., 0., 0.],
        ...                       end_position=[1., 1., 1.],
        ...                       solution_filename="solution.txt",
        ...                       data_filename="my_ray.h5",
        ...                       fields=["temperature", "density"],
        ...                       get_los_velocity=True)
        
        """

        # Calculate solution.
        self._calculate_light_ray_solution(seed=seed, 
                                           start_position=start_position, 
                                           end_position=end_position,
                                           trajectory=trajectory,
                                           filename=solution_filename)

        # Initialize data structures.
        self._data = {}
        if fields is None: fields = []
        data_fields = fields[:]
        all_fields = fields[:]
        all_fields.extend(['dl', 'dredshift', 'redshift'])
        if get_los_velocity:
            all_fields.extend(['velocity_x', 'velocity_y',
                               'velocity_z', 'velocity_los'])
            data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])

        all_ray_storage = {}
        for my_storage, my_segment in parallel_objects(self.light_ray_solution,
                                                       storage=all_ray_storage,
                                                       njobs=njobs):

            # Load dataset for segment.
            ds = load(my_segment['filename'], **self.load_kwargs)

            my_segment['unique_identifier'] = ds.unique_identifier
            if redshift is not None:
                if ds.cosmological_simulation and redshift != ds.current_redshift:
                    mylog.warn("Generating light ray with different redshift than " +
                               "the dataset itself.")
                my_segment["redshift"] = redshift

            if setup_function is not None:
                setup_function(ds)

            if start_position is not None:
                my_segment["start"] = ds.arr(my_segment["start"], "code_length")
                my_segment["end"] = ds.arr(my_segment["end"], "code_length")
            else:
                my_segment["start"] = ds.domain_width * my_segment["start"] + \
                  ds.domain_left_edge
                my_segment["end"] = ds.domain_width * my_segment["end"] + \
                  ds.domain_left_edge

            if not ds.cosmological_simulation:
                next_redshift = my_segment["redshift"]
            elif self.near_redshift == self.far_redshift:
                next_redshift = my_segment["redshift"] - \
                  self._deltaz_forward(my_segment["redshift"], 
                                       ds.domain_width[0].in_units("Mpccm / h") *
                                       my_segment["traversal_box_fraction"])
            elif my_segment.get("next", None) is None:
                next_redshift = self.near_redshift
            else:
                next_redshift = my_segment['next']['redshift']

            mylog.info("Getting segment at z = %s: %s to %s." %
                       (my_segment['redshift'], my_segment['start'],
                        my_segment['end']))

            # Break periodic ray into non-periodic segments.
            sub_segments = periodic_ray(my_segment['start'], my_segment['end'],
                                        left=ds.domain_left_edge,
                                        right=ds.domain_right_edge)

            # Prepare data structure for subsegment.
            sub_data = {}
            sub_data['segment_redshift'] = my_segment['redshift']
            for field in all_fields:
                sub_data[field] = []

            # Get data for all subsegments in segment.
            for sub_segment in sub_segments:
                mylog.info("Getting subsegment: %s to %s." %
                           (list(sub_segment[0]), list(sub_segment[1])))
                sub_ray = ds.ray(sub_segment[0], sub_segment[1])
                asort = np.argsort(sub_ray["t"])
                sub_data['dl'].extend(sub_ray['dts'][asort] *
                                      vector_length(sub_ray.start_point,
                                                    sub_ray.end_point))
                for field in data_fields:
                    sub_data[field].extend(sub_ray[field][asort])

                if get_los_velocity:
                    line_of_sight = sub_segment[1] - sub_segment[0]
                    line_of_sight /= ((line_of_sight**2).sum())**0.5
                    sub_vel = ds.arr([sub_ray['velocity_x'],
                                      sub_ray['velocity_y'],
                                      sub_ray['velocity_z']])
                    sub_data['velocity_los'].extend((np.rollaxis(sub_vel, 1) *
                                                     line_of_sight).sum(axis=1)[asort])
                    del sub_vel

                sub_ray.clear_data()
                del sub_ray, asort

            for key in sub_data:
                sub_data[key] = ds.arr(sub_data[key]).in_cgs()

            # Get redshift for each lixel.  Assume linear relation between l and z.
            sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
                (sub_data['dl'] / vector_length(my_segment['start'], 
                                                my_segment['end']).in_cgs())
            sub_data['redshift'] = my_segment['redshift'] - \
              sub_data['dredshift'].cumsum() + sub_data['dredshift']

            # Remove empty lixels.
            sub_dl_nonzero = sub_data['dl'].nonzero()
            for field in all_fields:
                sub_data[field] = sub_data[field][sub_dl_nonzero]
            del sub_dl_nonzero

            # Add to storage.
            my_storage.result = sub_data

            del ds

        # Reconstruct ray data from parallel_objects storage.
        all_data = [my_data for my_data in all_ray_storage.values()]
        # This is now a list of segments where each one is a dictionary
        # with all the fields.
        all_data.sort(key=lambda a:a['segment_redshift'], reverse=True)
        # Flatten the list into a single dictionary containing fields
        # for the whole ray.
        all_data = _flatten_dict_list(all_data, exceptions=['segment_redshift'])

        if data_filename is not None:
            self._write_light_ray(data_filename, all_data)

        self._data = all_data
        return all_data
Пример #14
0
    def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity):
        """
        Add the absorption lines to the spectrum.
        """
        # Only make voigt profile for slice of spectrum that is 10 times the line width.
        spectrum_bin_ratio = 5
        # Widen wavelength window until optical depth reaches a max value at the ends.
        max_tau = 0.001

        for line in self.line_list:
            column_density = field_data[line['field_name']] * field_data['dl']
            delta_lambda = line['wavelength'] * field_data['redshift']
            if use_peculiar_velocity:
                # include factor of (1 + z) because our velocity is in proper frame.
                delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
                    field_data['velocity_los'] / speed_of_light_cgs
            thermal_b = np.sqrt(
                (2 * boltzmann_constant_cgs * field_data['temperature']) /
                line['atomic_mass'])
            center_bins = np.digitize((delta_lambda + line['wavelength']),
                                      self.lambda_bins)

            # ratio of line width to bin width
            width_ratio = ((line['wavelength'] + delta_lambda) * \
                           thermal_b / speed_of_light_cgs / self.bin_width).in_units("").d

            if (width_ratio < 1.0).any():
                mylog.warn(("%d out of %d line components are unresolved, " +
                            "consider increasing spectral resolution.") %
                           ((width_ratio < 1.0).sum(), width_ratio.size))

            # do voigt profiles for a subset of the full spectrum
            left_index = (center_bins -
                          spectrum_bin_ratio * width_ratio).astype(int).clip(
                              0, self.n_lambda)
            right_index = (center_bins +
                           spectrum_bin_ratio * width_ratio).astype(int).clip(
                               0, self.n_lambda)

            # loop over all lines wider than the bin width
            valid_lines = np.where((width_ratio >= 1.0)
                                   & (right_index - left_index > 1))[0]
            pbar = get_pbar(
                "Adding line - %s [%f A]: " %
                (line['label'], line['wavelength']), valid_lines.size)
            for i, lixel in enumerate(valid_lines):
                my_bin_ratio = spectrum_bin_ratio
                while True:
                    lambda_bins, line_tau = \
                        tau_profile(
                            line['wavelength'], line['f_value'],
                            line['gamma'], thermal_b[lixel].in_units("km/s"),
                            column_density[lixel],
                            delta_lambda=delta_lambda[lixel],
                            lambda_bins=self.lambda_bins[left_index[lixel]:right_index[lixel]])

                    # Widen wavelength window until optical depth reaches a max value at the ends.
                    if (line_tau[0] < max_tau and line_tau[-1] < max_tau) or \
                      (left_index[lixel] <= 0 and right_index[lixel] >= self.n_lambda):
                        break
                    my_bin_ratio *= 2
                    left_index[lixel] = (
                        center_bins[lixel] -
                        my_bin_ratio * width_ratio[lixel]).astype(int).clip(
                            0, self.n_lambda)
                    right_index[lixel] = (
                        center_bins[lixel] +
                        my_bin_ratio * width_ratio[lixel]).astype(int).clip(
                            0, self.n_lambda)
                self.tau_field[
                    left_index[lixel]:right_index[lixel]] += line_tau
                if line['label_threshold'] is not None and \
                        column_density[lixel] >= line['label_threshold']:
                    if use_peculiar_velocity:
                        peculiar_velocity = field_data['velocity_los'][
                            lixel].in_units("km/s")
                    else:
                        peculiar_velocity = 0.0
                    self.spectrum_line_list.append({
                        'label':
                        line['label'],
                        'wavelength':
                        (line['wavelength'] + delta_lambda[lixel]),
                        'column_density':
                        column_density[lixel],
                        'b_thermal':
                        thermal_b[lixel],
                        'redshift':
                        field_data['redshift'][lixel],
                        'v_pec':
                        peculiar_velocity
                    })
                pbar.update(i)
            pbar.finish()

            del column_density, delta_lambda, thermal_b, \
                center_bins, width_ratio, left_index, right_index
    def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity):
        """
        Add the absorption lines to the spectrum.
        """
        # Only make voigt profile for slice of spectrum that is 10 times the line width.
        spectrum_bin_ratio = 5
        # Widen wavelength window until optical depth reaches a max value at the ends.
        max_tau = 0.001

        for line in self.line_list:
            column_density = field_data[line['field_name']] * field_data['dl']
            delta_lambda = line['wavelength'] * field_data['redshift']
            if use_peculiar_velocity:
                # include factor of (1 + z) because our velocity is in proper frame.
                delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
                    field_data['velocity_los'] / speed_of_light_cgs
            thermal_b =  np.sqrt((2 * boltzmann_constant_cgs *
                                  field_data['temperature']) /
                                  line['atomic_mass'])
            center_bins = np.digitize((delta_lambda + line['wavelength']),
                                      self.lambda_bins)

            # ratio of line width to bin width
            width_ratio = ((line['wavelength'] + delta_lambda) * \
                           thermal_b / speed_of_light_cgs / self.bin_width).in_units("").d

            if (width_ratio < 1.0).any():
                mylog.warn(("%d out of %d line components are unresolved, " +
                            "consider increasing spectral resolution.") %
                           ((width_ratio < 1.0).sum(), width_ratio.size))

            # do voigt profiles for a subset of the full spectrum
            left_index  = (center_bins -
                           spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
            right_index = (center_bins +
                           spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)

            # loop over all lines wider than the bin width
            valid_lines = np.where((width_ratio >= 1.0) &
                                   (right_index - left_index > 1))[0]
            pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
                            valid_lines.size)
            for i, lixel in enumerate(valid_lines):
                my_bin_ratio = spectrum_bin_ratio
                while True:
                    lambda_bins, line_tau = \
                        tau_profile(
                            line['wavelength'], line['f_value'],
                            line['gamma'], thermal_b[lixel].in_units("km/s"),
                            column_density[lixel],
                            delta_lambda=delta_lambda[lixel],
                            lambda_bins=self.lambda_bins[left_index[lixel]:right_index[lixel]])
                        
                    # Widen wavelength window until optical depth reaches a max value at the ends.
                    if (line_tau[0] < max_tau and line_tau[-1] < max_tau) or \
                      (left_index[lixel] <= 0 and right_index[lixel] >= self.n_lambda):
                        break
                    my_bin_ratio *= 2
                    left_index[lixel]  = (center_bins[lixel] -
                                          my_bin_ratio *
                                          width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
                    right_index[lixel] = (center_bins[lixel] +
                                          my_bin_ratio *
                                          width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
                self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
                if line['label_threshold'] is not None and \
                        column_density[lixel] >= line['label_threshold']:
                    if use_peculiar_velocity:
                        peculiar_velocity = field_data['velocity_los'][lixel].in_units("km/s")
                    else:
                        peculiar_velocity = 0.0
                    self.spectrum_line_list.append({'label': line['label'],
                                                    'wavelength': (line['wavelength'] +
                                                                   delta_lambda[lixel]),
                                                    'column_density': column_density[lixel],
                                                    'b_thermal': thermal_b[lixel],
                                                    'redshift': field_data['redshift'][lixel],
                                                    'v_pec': peculiar_velocity})
                pbar.update(i)
            pbar.finish()

            del column_density, delta_lambda, thermal_b, \
                center_bins, width_ratio, left_index, right_index
Пример #16
0
from yt.extern import \
    six
from yt.funcs import \
    is_root, mylog
from yt.utilities.parallel_tools.parallel_analysis_interface import \
    ParallelAnalysisInterface, \
    ProcessorPool
from yt.utilities.exceptions import \
    YTRockstarMultiMassNotSupported

try:
    from yt_astro_analysis.halo_finding.rockstar import \
     rockstar_interface
except ImportError:
    mylog.warn(
        ("Cannot import the rockstar interface.  Rockstar will not run.\n" +
         "If you need Rockstar, see the installation instructions at " +
         "http://yt-astro-analysis.readthedocs.io/."))
    rockstar_interface = None

import socket
import time
import os
import numpy as np
from os import path


class InlineRunner(ParallelAnalysisInterface):
    def __init__(self):
        # If this is being run inline, num_readers == comm.size, always.
        psize = ytcfg.getint("yt", "__global_parallel_size")
        self.num_readers = psize
Пример #17
0
    def _add_lines_to_spectrum(self,
                               field_data,
                               use_peculiar_velocity,
                               output_absorbers_file,
                               subgrid_resolution=10,
                               observing_redshift=0.,
                               njobs=-1):
        """
        Add the absorption lines to the spectrum.
        """

        # Change the redshifts of individual absorbers to account for the
        # redshift at which the observer sits
        redshift, redshift_eff = self._apply_observing_redshift(
            field_data, use_peculiar_velocity, observing_redshift)

        # Widen wavelength window until optical depth falls below this tau
        # value at the ends to assure that the wings of a line have been
        # fully resolved.
        min_tau = 1e-3

        # step through each ionic transition (e.g. HI, HII, MgII) specified
        # and deposit the lines into the spectrum
        for line in parallel_objects(self.line_list, njobs=njobs):
            column_density = field_data[line['field_name']] * field_data['dl']
            if (column_density < 0).any():
                mylog.warn(
                    "Setting negative densities for field %s to 0! Bad!" %
                    line['field_name'])
                np.clip(column_density, 0, np.inf, out=column_density)
            if (column_density == 0).all():
                mylog.info("Not adding line %s: insufficient column density" %
                           line['label'])
                continue

            # redshift_eff field combines cosmological and velocity redshifts
            # so delta_lambda gives the offset in angstroms from the rest frame
            # wavelength to the observed wavelength of the transition
            if use_peculiar_velocity:
                delta_lambda = line['wavelength'] * redshift_eff
            else:
                delta_lambda = line['wavelength'] * redshift
            # lambda_obs is central wavelength of line after redshift
            lambda_obs = line['wavelength'] + delta_lambda
            # the total number of absorbers per transition
            n_absorbers = len(lambda_obs)

            # we want to know the bin index in the lambda_field array
            # where each line has its central wavelength after being
            # redshifted.  however, because we don't know a priori how wide
            # a line will be (ie DLAs), we have to include bin indices
            # *outside* the spectral range of the AbsorptionSpectrum
            # object.  Thus, we find the "equivalent" bin index, which
            # may be <0 or >the size of the array.  In the end, we deposit
            # the bins that actually overlap with the AbsorptionSpectrum's
            # range in lambda.

            # this equation gives us the "equivalent" bin index for each line
            # if it were placed into the self.lambda_field array
            center_index = (lambda_obs.in_units('Angstrom').d - self.lambda_min) \
                            / self.bin_width.d
            center_index = np.ceil(center_index).astype('int')

            # thermal broadening b parameter
            thermal_b = np.sqrt(
                (2 * boltzmann_constant_cgs * field_data['temperature']) /
                line['atomic_mass'])

            # the actual thermal width of the lines
            thermal_width = (lambda_obs * thermal_b /
                             speed_of_light_cgs).convert_to_units("angstrom")

            # Sanitize units for faster runtime of the tau_profile machinery.
            lambda_0 = line['wavelength'].d  # line's rest frame; angstroms
            cdens = column_density.in_units("cm**-2").d  # cm**-2
            thermb = thermal_b.in_cgs().d  # thermal b coefficient; cm / s
            dlambda = delta_lambda.d  # lambda offset; angstroms
            if use_peculiar_velocity:
                vlos = field_data['velocity_los'].in_units("km/s").d  # km/s
            else:
                vlos = np.zeros(field_data['temperature'].size)

            # When we actually deposit the voigt profile, sometimes we will
            # have underresolved lines (ie lines with smaller widths than
            # the spectral bin size).  Here, we create virtual wavelength bins
            # small enough in width to well resolve each line, deposit the
            # voigt profile into them, then numerically integrate their tau
            # values and sum them to redeposit them into the actual spectral
            # bins.

            # virtual bins (vbins) will be:
            # 1) <= the bin_width; assures at least as good as spectral bins
            # 2) <= 1/10th the thermal width; assures resolving voigt profiles
            #   (actually 1/subgrid_resolution value, default is 1/10)
            # 3) a bin width will be divisible by vbin_width times a power of
            #    10; this will assure we don't get spikes in the deposited
            #    spectra from uneven numbers of vbins per bin
            resolution = thermal_width / self.bin_width
            n_vbins_per_bin = (10**(np.ceil(
                np.log10(subgrid_resolution / resolution)).clip(
                    0, np.inf))).astype('int')
            vbin_width = self.bin_width.d / n_vbins_per_bin

            # a note to the user about which lines components are unresolved
            if (thermal_width < self.bin_width).any():
                mylog.info(
                    "%d out of %d line components will be " +
                    "deposited as unresolved lines.",
                    (thermal_width < self.bin_width).sum(), n_absorbers)

            # provide a progress bar with information about lines processed
            pbar = get_pbar("Adding line - %s [%f A]: " % \
                            (line['label'], line['wavelength']), n_absorbers)

            # for a given transition, step through each location in the
            # observed spectrum where it occurs and deposit a voigt profile
            for i in parallel_objects(np.arange(n_absorbers), njobs=-1):

                # if there is a ray element with temperature = 0 or column
                # density = 0, skip it
                if (thermal_b[i] == 0.) or (cdens[i] == 0.):
                    pbar.update(i)
                    continue

                # the virtual window into which the line is deposited initially
                # spans a region of 2 coarse spectral bins
                # (one on each side of the center_index) but the window
                # can expand as necessary.
                # it will continue to expand until the tau value in the far
                # edge of the wings is less than the min_tau value or it
                # reaches the edge of the spectrum
                window_width_in_bins = 2

                while True:
                    left_index = (center_index[i] - window_width_in_bins // 2)
                    right_index = (center_index[i] + window_width_in_bins // 2)
                    n_vbins = (right_index - left_index) * n_vbins_per_bin[i]

                    # the array of virtual bins in lambda space
                    vbins = \
                        np.linspace(self.lambda_min + self.bin_width.d * left_index,
                                    self.lambda_min + self.bin_width.d * right_index,
                                    n_vbins, endpoint=False)

                    # the virtual bins and their corresponding opacities
                    vbins, vtau = \
                        tau_profile(
                            lambda_0, line['f_value'], line['gamma'],
                            thermb[i], cdens[i],
                            delta_lambda=dlambda[i], lambda_bins=vbins)

                    # If tau has not dropped below min tau threshold by the
                    # edges (ie the wings), then widen the wavelength
                    # window and repeat process.
                    if (vtau[0] < min_tau and vtau[-1] < min_tau):
                        break
                    window_width_in_bins *= 2

                # numerically integrate the virtual bins to calculate a
                # virtual equivalent width; then sum the virtual equivalent
                # widths and deposit into each spectral bin
                vEW = vtau * vbin_width[i]
                EW = np.zeros(right_index - left_index)
                EW_indices = np.arange(left_index, right_index)
                for k, val in enumerate(EW_indices):
                    EW[k] = vEW[n_vbins_per_bin[i] * k: \
                                n_vbins_per_bin[i] * (k + 1)].sum()
                EW = EW / self.bin_width.d

                # only deposit EW bins that actually intersect the original
                # spectral wavelength range (i.e. lambda_field)

                # if EW bins don't intersect the original spectral range at all
                # then skip the deposition
                if ((left_index >= self.n_lambda) or \
                    (right_index < 0)):
                    pbar.update(i)
                    continue

                # otherwise, determine how much of the original spectrum
                # is intersected by the expanded line window to be deposited,
                # and deposit the Equivalent Width data into that intersecting
                # window in the original spectrum's tau
                else:
                    intersect_left_index = max(left_index, 0)
                    intersect_right_index = min(right_index, self.n_lambda - 1)
                    self.tau_field[intersect_left_index:intersect_right_index] \
                        += EW[(intersect_left_index - left_index): \
                              (intersect_right_index - left_index)]

                # write out absorbers to file if the column density of
                # an absorber is greater than the specified "label_threshold"
                # of that absorption line
                if output_absorbers_file and \
                   line['label_threshold'] is not None and \
                   cdens[i] >= line['label_threshold']:

                    if use_peculiar_velocity:
                        peculiar_velocity = vlos[i]
                    else:
                        peculiar_velocity = 0.0
                    self.absorbers_list.append({
                        'label':
                        line['label'],
                        'wavelength': (lambda_0 + dlambda[i]),
                        'column_density':
                        column_density[i],
                        'b_thermal':
                        thermal_b[i],
                        'redshift':
                        redshift[i],
                        'redshift_eff':
                        redshift_eff[i],
                        'v_pec':
                        peculiar_velocity
                    })
                pbar.update(i)
            pbar.finish()

            del column_density, delta_lambda, lambda_obs, center_index, \
                thermal_b, thermal_width, cdens, thermb, dlambda, \
                vlos, resolution, vbin_width, n_vbins, n_vbins_per_bin

        comm = _get_comm(())
        self.tau_field = comm.mpi_allreduce(self.tau_field, op="sum")
        if output_absorbers_file:
            self.absorbers_list = comm.par_combine_object(self.absorbers_list,
                                                          "cat",
                                                          datatype="list")
Пример #18
0
 def _count_particles(self, data_file):
     pcount = self._handle['x'].size
     if (pcount > 1e9):
         mylog.warn("About to load %i particles into memory. " % (pcount) +
                    "You may want to consider a midx-enabled load")
     return {'dark_matter': pcount}