def can_run_ds(ds_fn, file_check = False):
    if isinstance(ds_fn, Dataset):
        return AnswerTestingTest.result_storage is not None
    path = ytcfg.get("yt", "test_data_dir")
    if not os.path.isdir(path):
        return False
    with temp_cwd(path):
        if file_check:
            return os.path.isfile(ds_fn) and \
                AnswerTestingTest.result_storage is not None
        try:
            load(ds_fn)
        except YTOutputNotIdentified:
            return False
    return AnswerTestingTest.result_storage is not None
    def _check_for_outputs(self, potential_outputs):
        """
        Check a list of files to see if they are valid datasets.
        """

        only_on_root(mylog.info, "Checking %d potential outputs.", 
                     len(potential_outputs))

        my_outputs = {}
        for my_storage, output in parallel_objects(potential_outputs, 
                                                   storage=my_outputs):
            if self.parameters['DataDumpDir'] in output:
                dir_key = self.parameters['DataDumpDir']
                output_key = self.parameters['DataDumpName']
            else:
                dir_key = self.parameters['RedshiftDumpDir']
                output_key = self.parameters['RedshiftDumpName']
            index = output[output.find(dir_key) + len(dir_key):]
            filename = os.path.join(self.parameters['GlobalDir'],
                                    "%s%s" % (dir_key, index),
                                    "%s%s" % (output_key, index))
            if os.path.exists(filename):
                try:
                    ds = load(filename)
                    if ds is not None:
                        my_storage.result = {'filename': filename,
                                             'time': ds.current_time.in_units("s")}
                        if ds.cosmological_simulation:
                            my_storage.result['redshift'] = ds.current_redshift
                except YTOutputNotIdentified:
                    mylog.error('Failed to load %s', filename)
        my_outputs = [my_output for my_output in my_outputs.values() \
                      if my_output is not None]

        return my_outputs
def test_rockstar():
    from mpi4py import MPI
    filename = os.path.join(os.path.dirname(__file__),
                            "run_rockstar.py")
    comm = MPI.COMM_SELF.Spawn(sys.executable,
                               args=[filename],
                               maxprocs=3)
    comm.Disconnect()

    h1 = "rockstar_halos/halos_0.0.bin"
    d1 = load(h1)
    for field in _fields:
        yield FieldValuesTest(d1, field, particle_type=True, decimals=1)
    h2 = "rockstar_halos/halos_1.0.bin"
    d2 = load(h2)
    for field in _fields:
        yield FieldValuesTest(d2, field, particle_type=True, decimals=1)
def test_nprocs():
    ytcfg["yt","skip_dataset_cache"] = "True"

    ds1 = load(sloshing, units_override=uo_sloshing)
    sp1 = ds1.sphere("c", (100.,"kpc"))
    prj1 = ds1.proj("density",0)
    ds2 = load(sloshing, units_override=uo_sloshing, nprocs=8)
    sp2 = ds2.sphere("c", (100.,"kpc"))
    prj2 = ds1.proj("density",0)

    yield assert_equal, sp1.quantities.extrema("pressure"), sp2.quantities.extrema("pressure")
    yield assert_allclose_units, sp1.quantities.total_quantity("pressure"), sp2.quantities.total_quantity("pressure")
    for ax in "xyz":
        yield assert_equal, sp1.quantities.extrema("velocity_%s" % ax), sp2.quantities.extrema("velocity_%s" % ax)
    yield assert_allclose_units, sp1.quantities.bulk_velocity(), sp2.quantities.bulk_velocity()
    yield assert_equal, prj1["density"], prj2["density"]

    ytcfg["yt","skip_dataset_cache"] = "False"
def units_override_check(fn):
    ytcfg["yt","skip_dataset_cache"] = "True"
    units_list = ["length","time","mass","velocity",
                  "magnetic","temperature"]
    ds1 = load(fn)
    units_override = {}
    attrs1 = []
    attrs2 = []
    for u in units_list:
        unit_attr = getattr(ds1, "%s_unit" % u, None)
        if unit_attr is not None:
            attrs1.append(unit_attr)
            units_override["%s_unit" % u] = (unit_attr.v, str(unit_attr.units))
    del ds1
    ds2 = load(fn, units_override=units_override)
    ytcfg["yt","skip_dataset_cache"] = "False"
    assert(len(ds2.units_override) > 0)
    for u in units_list:
        unit_attr = getattr(ds2, "%s_unit" % u, None)
        if unit_attr is not None:
            attrs2.append(unit_attr)
    yield assert_equal, attrs1, attrs2
def data_dir_load(ds_fn, cls = None, args = None, kwargs = None):
    args = args or ()
    kwargs = kwargs or {}
    path = ytcfg.get("yt", "test_data_dir")
    if isinstance(ds_fn, Dataset): return ds_fn
    if not os.path.isdir(path):
        return False
    with temp_cwd(path):
        if cls is None:
            ds = load(ds_fn, *args, **kwargs)
        else:
            ds = cls(ds_fn, *args, **kwargs)
        ds.index
        return ds
def test_halo_finders():
    from mpi4py import MPI
    filename = os.path.join(os.path.dirname(__file__), "run_halo_finder.py")
    for method in methods:
        comm = MPI.COMM_SELF.Spawn(
            sys.executable, args=[filename, method], maxprocs=methods[method])
        comm.Disconnect()

        fn = os.path.join(
            os.path.dirname(__file__), "halo_catalogs", method,
            "%s.0.h5" % method)
        ds = load(fn)
        for field in _fields:
            yield FieldValuesTest(
                ds, field, particle_type=True, decimals=decimals[method])
    def __init__(self, parameter_filename, simulation_type=None,
                 near_redshift=None, far_redshift=None,
                 use_minimum_datasets=True, deltaz_min=0.0,
                 minimum_coherent_box_fraction=0.0,
                 time_data=True, redshift_data=True,
                 find_outputs=False, load_kwargs=None):

        self.near_redshift = near_redshift
        self.far_redshift = far_redshift
        self.use_minimum_datasets = use_minimum_datasets
        self.deltaz_min = deltaz_min
        self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
        self.parameter_filename = parameter_filename
        if load_kwargs is None:
            self.load_kwargs = {}
        else:
            self.load_kwargs = load_kwargs
        self.light_ray_solution = []
        self._data = {}

        # Make a light ray from a single, given dataset.        
        if simulation_type is None:
            ds = load(parameter_filename, **self.load_kwargs)
            if ds.cosmological_simulation:
                redshift = ds.current_redshift
                self.cosmology = Cosmology(
                    hubble_constant=ds.hubble_constant,
                    omega_matter=ds.omega_matter,
                    omega_lambda=ds.omega_lambda,
                    unit_registry=ds.unit_registry)
            else:
                redshift = 0.
            self.light_ray_solution.append({"filename": parameter_filename,
                                            "redshift": redshift})

        # Make a light ray from a simulation time-series.
        else:
            # Get list of datasets for light ray solution.
            CosmologySplice.__init__(self, parameter_filename, simulation_type,
                                     find_outputs=find_outputs)
            self.light_ray_solution = \
              self.create_cosmology_splice(self.near_redshift, self.far_redshift,
                                           minimal=self.use_minimum_datasets,
                                           deltaz_min=self.deltaz_min,
                                           time_data=time_data,
                                           redshift_data=redshift_data)
    def _check_for_outputs(self, potential_outputs):
        r"""
        Check a list of files to see if they are valid datasets.
        """

        only_on_root(mylog.info, "Checking %d potential outputs.", 
                     len(potential_outputs))

        my_outputs = {}
        for my_storage, output in parallel_objects(potential_outputs, 
                                                   storage=my_outputs):
            if os.path.exists(output):
                try:
                    ds = load(output)
                    if ds is not None:
                        my_storage.result = {"filename": output,
                                             "time": ds.current_time.in_units("s")}
                        if ds.cosmological_simulation:
                            my_storage.result["redshift"] = ds.current_redshift
                except YTOutputNotIdentified:
                    mylog.error("Failed to load %s", output)
        my_outputs = [my_output for my_output in my_outputs.values() \
                      if my_output is not None]
        return my_outputs
Example #10
0
def test_covering_grid():
    # We decompose in different ways
    for level in [0, 1, 2]:
        for nprocs in [1, 2, 4, 8]:
            ds = fake_random_ds(16, nprocs = nprocs)
            axis_name = ds.coordinates.axis_name
            dn = ds.refine_by**level 
            cg = ds.covering_grid(level, [0.0, 0.0, 0.0],
                    dn * ds.domain_dimensions)
            # Test coordinate generation
            assert_equal(np.unique(cg["d%s" % axis_name[0]]).size, 1)
            xmi = cg[axis_name[0]].min()
            xma = cg[axis_name[0]].max()
            dx = cg["d%s" % axis_name[0]].flat[0:1]
            edges = ds.arr([[0,1],[0,1],[0,1]], 'code_length')
            assert_equal(xmi, edges[0,0] + dx/2.0)
            assert_equal(xmi, cg[axis_name[0]][0,0,0])
            assert_equal(xmi, cg[axis_name[0]][0,1,1])
            assert_equal(xma, edges[0,1] - dx/2.0)
            assert_equal(xma, cg[axis_name[0]][-1,0,0])
            assert_equal(xma, cg[axis_name[0]][-1,1,1])
            assert_equal(np.unique(cg["d%s" % axis_name[1]]).size, 1)
            ymi = cg[axis_name[1]].min()
            yma = cg[axis_name[1]].max()
            dy = cg["d%s" % axis_name[1]][0]
            assert_equal(ymi, edges[1,0] + dy/2.0)
            assert_equal(ymi, cg[axis_name[1]][0,0,0])
            assert_equal(ymi, cg[axis_name[1]][1,0,1])
            assert_equal(yma, edges[1,1] - dy/2.0)
            assert_equal(yma, cg[axis_name[1]][0,-1,0])
            assert_equal(yma, cg[axis_name[1]][1,-1,1])
            assert_equal(np.unique(cg["d%s" % axis_name[2]]).size, 1)
            zmi = cg[axis_name[2]].min()
            zma = cg[axis_name[2]].max()
            dz = cg["d%s" % axis_name[2]][0]
            assert_equal(zmi, edges[2,0] + dz/2.0)
            assert_equal(zmi, cg[axis_name[2]][0,0,0])
            assert_equal(zmi, cg[axis_name[2]][1,1,0])
            assert_equal(zma, edges[2,1] - dz/2.0)
            assert_equal(zma, cg[axis_name[2]][0,0,-1])
            assert_equal(zma, cg[axis_name[2]][1,1,-1])
            # Now we test other attributes
            assert_equal(cg["ones"].max(), 1.0)
            assert_equal(cg["ones"].min(), 1.0)
            assert_equal(cg["grid_level"], level)
            assert_equal(cg["cell_volume"].sum(), ds.domain_width.prod())
            for g in ds.index.grids:
                di = g.get_global_startindex()
                dd = g.ActiveDimensions
                for i in range(dn):
                    f = cg["density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
                                      dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
                                      dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
                    assert_equal(f, g["density"])

    # More tests for cylindrical geometry
    for fn in [cyl_2d, cyl_3d]:
        ds = load(fn)
        ad = ds.all_data()
        upper_ad = ad.cut_region(["obj['z'] > 0"])
        sp = ds.sphere((0, 0, 0), 0.5 * ds.domain_width[0],
                       data_source=upper_ad)
        sp.quantities.total_mass()
Example #11
0
    def generate_events(self, area, exp_time, angular_width,
                        source_model, sky_center, parameters=None,
                        velocity_fields=None, absorb_model=None,
                        nH=None, no_shifting=False, sigma_pos=None,
                        prng=None):
        """
        Generate projected events from a light cone simulation. 

        Parameters
        ----------
        area : float, (value, unit) tuple, or :class:`~yt.units.yt_array.YTQuantity`
            The collecting area to determine the number of events. If units are
            not specified, it is assumed to be in cm^2.
        exp_time : float, (value, unit) tuple, or :class:`~yt.units.yt_array.YTQuantity`
            The exposure time to determine the number of events. If units are
            not specified, it is assumed to be in seconds.
        angular_width : float, (value, unit) tuple, or :class:`~yt.units.yt_array.YTQuantity`
            The angular width of the light cone simulation. If units are not
            specified, it is assumed to be in degrees.
        source_model : :class:`~pyxsim.source_models.SourceModel`
            A source model used to generate the events.
        sky_center : array-like
            Center RA, Dec of the events in degrees.
        parameters : dict, optional
            A dictionary of parameters to be passed for the source model to use,
            if necessary.
        velocity_fields : list of fields
            The yt fields to use for the velocity. If not specified, the following will
            be assumed:
            ['velocity_x', 'velocity_y', 'velocity_z'] for grid datasets
            ['particle_velocity_x', 'particle_velocity_y', 'particle_velocity_z'] for particle datasets
        absorb_model : string or :class:`~pyxsim.spectral_models.AbsorptionModel` 
            A model for foreground galactic absorption, to simulate the absorption
            of events before being detected. This cannot be applied here if you 
            already did this step previously in the creation of the 
            :class:`~pyxsim.photon_list.PhotonList` instance. Known options for 
            strings are "wabs" and "tbabs".
        nH : float, optional
            The foreground column density in units of 10^22 cm^{-2}. Only used if
            absorption is applied.
        no_shifting : boolean, optional
            If set, the photon energies will not be Doppler shifted.
        sigma_pos : float, optional
            Apply a gaussian smoothing operation to the sky positions of the
            events. This may be useful when the binned events appear blocky due
            to their uniform distribution within simulation cells. However, this
            will move the events away from their originating position on the
            sky, and so may distort surface brightness profiles and/or spectra.
            Should probably only be used for visualization purposes. Supply a
            float here to smooth with a standard deviation with this fraction
            of the cell size. Default: None
        prng : integer or :class:`~numpy.random.RandomState` object
            A pseudo-random number generator. Typically will only be specified
            if you have a reason to generate the same set of random numbers, such as for a
            test. Default is to use the :mod:`numpy.random` module.
        """
        prng = parse_prng(prng)

        area = parse_value(area, "cm**2")
        exp_time = parse_value(exp_time, "s")
        aw = parse_value(angular_width, "deg")

        tot_events = defaultdict(list)

        for output in self.light_cone_solution:
            ds = load(output["filename"])
            ax = output["projection_axis"]
            c = output["projection_center"]*ds.domain_width + ds.domain_left_edge
            le = c.copy()
            re = c.copy()
            width = ds.quan(aw*output["box_width_per_angle"], "unitary").to("code_length")
            depth = ds.domain_width[ax].in_units("code_length")*output["box_depth_fraction"]
            le[ax] -= 0.5*depth
            re[ax] += 0.5*depth
            for off_ax in axes_lookup[ax]:
                le[off_ax] -= 0.5*width
                re[off_ax] += 0.5*width
            reg = ds.box(le, re)
            photons = PhotonList.from_data_source(reg, output['redshift'], area,
                                                  exp_time, source_model,
                                                  parameters=parameters,
                                                  center=c,
                                                  velocity_fields=velocity_fields,
                                                  cosmology=ds.cosmology)
            if sum(photons["num_photons"]) > 0:
                events = photons.project_photons("xyz"[ax], sky_center,
                                                 absorb_model=absorb_model, nH=nH,
                                                 no_shifting=no_shifting, 
                                                 sigma_pos=sigma_pos,
                                                 prng=prng)
                if events.num_events > 0:
                    tot_events["xsky"].append(events["xsky"])
                    tot_events["ysky"].append(events["ysky"])
                    tot_events["eobs"].append(events["eobs"])
                del events

            del photons

        parameters = {"exp_time": exp_time,
                      "area": area, 
                      "sky_center": YTArray(sky_center, "deg")}

        for key in tot_events:
            tot_events[key] = uconcatenate(tot_events[key])

        return EventList(tot_events, parameters)
Example #12
0
def test_blast_override():
    # verify that overriding units causes derived unit values to be updated.
    # see issue #1259
    ds = load(blast, units_override=uo_blast)
    assert_equal(float(ds.magnetic_unit.in_units('gauss')),
                 5.478674679698131e-07)
Example #13
0
    def make_spectrum(self,
                      input_file,
                      output_file=None,
                      line_list_file=None,
                      output_absorbers_file=None,
                      use_peculiar_velocity=True,
                      subgrid_resolution=10,
                      observing_redshift=0.,
                      njobs="auto"):
        """
        Make spectrum from ray data using the line list.

        Parameters
        ----------

        input_file : string or dataset
           path to input ray data or a loaded ray dataset
        output_file : optional, string
           Option to save a file containing the wavelength, flux, and optical
           depth fields.  File formats are chosen based on the filename
           extension. ``.h5`` for hdf5, ``.fits`` for fits, and everything
           else is ASCII.
           Default: None
        output_absorbers_file : optional, string
           Option to save a text file containing all of the absorbers and
           corresponding wavelength and redshift information.
           For parallel jobs, combining the lines lists can be slow so it
           is recommended to set to None in such circumstances.
           Default: None
        use_peculiar_velocity : optional, bool
           if True, include peculiar velocity for calculating doppler redshift
           to shift lines.  Requires similar flag to be set in LightRay
           generation.
           Default: True
        subgrid_resolution : optional, int
           When a line is being added that is unresolved (ie its thermal
           width is less than the spectral bin width), the voigt profile of
           the line is deposited into an array of virtual wavelength bins at
           higher resolution.  The optical depth from these virtual bins is
           integrated and then added to the coarser spectral wavelength bin.
           The subgrid_resolution value determines the ratio between the
           thermal width and the bin width of the virtual bins.  Increasing
           this value yields smaller virtual bins, which increases accuracy,
           but is more expensive.  A value of 10 yields accuracy to the 4th
           significant digit in tau.
           Default: 10
        observing_redshift : optional, float
           This is the redshift at which the observer is observing
           the absorption spectrum.
           Default: 0
        njobs : optional, int or "auto"
           the number of process groups into which the loop over
           absorption lines will be divided.  If set to -1, each
           absorption line will be deposited by exactly one processor.
           If njobs is set to a value less than the total number of
           available processors (N), then the deposition of an
           individual line will be parallelized over (N / njobs)
           processors.  If set to "auto", it will first try to
           parallelize over the list of lines and only parallelize
           the line deposition if there are more processors than
           lines.  This is the optimal strategy for parallelizing
           spectrum generation.
           Default: "auto"
        """
        if line_list_file is not None:
            mylog.info("'line_list_file' keyword is deprecated. Please use " \
                       "'output_absorbers_file'.")
            output_absorbers_file = line_list_file

        input_fields = ['dl', 'redshift', 'temperature']
        field_units = {"dl": "cm", "redshift": "", "temperature": "K"}
        if use_peculiar_velocity:
            input_fields.append('velocity_los')
            input_fields.append('redshift_eff')
            field_units["velocity_los"] = "cm/s"
            field_units["redshift_eff"] = ""
        if observing_redshift != 0.:
            input_fields.append('redshift_dopp')
            field_units["redshift_dopp"] = ""
        for feature in self.line_list + self.continuum_list:
            if not feature['field_name'] in input_fields:
                input_fields.append(feature['field_name'])
                field_units[feature["field_name"]] = "cm**-3"

        if isinstance(input_file, string_types):
            input_ds = load(input_file)
        else:
            input_ds = input_file
        field_data = input_ds.all_data()

        # temperature field required to calculate voigt profile widths
        if ('temperature' not in input_ds.derived_field_list) and \
           (('gas', 'temperature') not in input_ds.derived_field_list):
            raise RuntimeError(
                "('gas', 'temperature') field required to be present in %s "
                "for AbsorptionSpectrum to function." % input_file)

        self.tau_field = np.zeros(self.lambda_field.size)
        self.absorbers_list = []

        if njobs == "auto":
            comm = _get_comm(())
            njobs = min(comm.size, len(self.line_list))

        mylog.info("Creating spectrum")
        self._add_lines_to_spectrum(field_data,
                                    use_peculiar_velocity,
                                    output_absorbers_file,
                                    subgrid_resolution=subgrid_resolution,
                                    observing_redshift=observing_redshift,
                                    njobs=njobs)
        self._add_continua_to_spectrum(field_data,
                                       use_peculiar_velocity,
                                       observing_redshift=observing_redshift)

        self.flux_field = np.exp(-self.tau_field)

        if output_file is None:
            pass
        elif output_file.endswith('.h5'):
            self._write_spectrum_hdf5(output_file)
        elif output_file.endswith('.fits'):
            self._write_spectrum_fits(output_file)
        else:
            self._write_spectrum_ascii(output_file)
        if output_absorbers_file is not None:
            self._write_absorbers_file(output_absorbers_file)

        del field_data
        return (self.lambda_field, self.flux_field)
Example #14
0
    def test_enzo_small_simple(self):
        """
        This is an answer test, which compares the results of this test
        against answers generated from a previous version of the code.

        This test generates a COS spectrum from a single Enzo dataset
        using a simple ray and compare the ray and spectral output data
        against a known answer.

        """

        # Set the dataset filename, load it into yt and define the trajectory
        # of the LightRay to cross the box from one corner to the other.
        ds = load(os.path.join(enzo_small, 'RD0009/RD0009'))
        ray_start = ds.domain_left_edge
        ray_end = ds.domain_right_edge

        # Make a LightRay object including all necessary fields so you can add
        # all H, C, N, O, and Mg fields to the resulting spectrum from your dataset.
        # Save LightRay to ray.h5 and use it locally as ray object.
        ray_fn = 'enzo_small_simple_ray.h5'
        ray = make_simple_ray(ds,
                              start_position=ray_start,
                              end_position=ray_end,
                              data_filename=ray_fn,
                              lines=['H', 'C', 'N', 'O', 'Mg'],
                              ftype='gas')

        # Now use the ray object to actually generate an absorption spectrum
        # Use the settings (spectral range, LSF, and spectral resolution) for COS
        # And save it as an output hdf5 file and plot it to an image.
        sg = SpectrumGenerator('COS')
        sg.make_spectrum(ray, lines=['H', 'C', 'N', 'O', 'Mg'])
        raw_file = 'enzo_small_simple_spec_raw.h5'
        raw_file_compare = os.path.join(test_results_dir, raw_file)
        sg.save_spectrum(raw_file)
        sg.plot_spectrum('enzo_small_simple_spec_raw.png')

        # "Final" spectrum with added quasar, MW background, applied line-spread
        # function, and added gaussian noise (SNR=30)
        # Save as a text file and plot it to an image.
        sg.add_qso_spectrum()
        sg.add_milky_way_foreground()
        sg.apply_lsf()
        sg.add_gaussian_noise(30, seed=1)
        final_file = 'enzo_small_simple_spec_final.h5'
        final_file_compare = os.path.join(test_results_dir, final_file)
        sg.save_spectrum(final_file)
        sg.plot_spectrum('enzo_small_simple_spec_final.png')

        if generate_results:
            os.rename(raw_file, raw_file_compare)
            os.rename(final_file, final_file_compare)

        else:
            old_spec = h5py.File(raw_file_compare, 'r')
            new_spec = h5py.File(raw_file, 'r')
            for key in old_spec.keys():
                assert_almost_equal(new_spec[key][()], old_spec[key][()], \
                                    decimal=err_precision,
                                    err_msg='Raw spectrum array does not match '+\
                                    'for enzo_small_simple answer test')
            old_spec.close()
            new_spec.close()

            old_spec = h5py.File(final_file_compare, 'r')
            new_spec = h5py.File(final_file, 'r')
            for key in old_spec.keys():
                assert_almost_equal(new_spec[key][()], old_spec[key][()], \
                                    decimal=err_precision,
                                    err_msg='Final spectrum array does not match '+\
                                    'for enzo_small_simple answer test')
            old_spec.close()
            new_spec.close()
    def project_light_cone(self, field_of_view, image_resolution, field,
                           weight_field=None, photon_field=False,
                           save_stack=True, save_final_image=True,
                           save_slice_images=False,
                           cmap_name="algae",
                           njobs=1, dynamic=False):
        r"""Create projections for light cone, then add them together.

        Parameters
        ----------
        field_of_view : YTQuantity or tuple of (float, str)
            The field of view of the image and the units.
        image_resolution : YTQuantity or tuple of (float, str)
            The size of each image pixel and the units.
        field : string
            The projected field.
        weight_field : string
            the weight field of the projection.  This has the same meaning as
            in standard projections.
            Default: None.
        photon_field : bool
            if True, the projection data for each slice is decremented by 4 Pi
            R^2`, where R is the luminosity distance between the observer and
            the slice redshift.
            Default: False.
        save_stack : bool
            if True, the light cone data including each individual
            slice is written to an hdf5 file.
            Default: True.
        save_final_image : bool
            if True, save an image of the final light cone projection.
            Default: True.
        save_slice_images : bool
            save images for each individual projection slice.
            Default: False.
        cmap_name : string
            color map for images.
            Default: "algae".
        njobs : int
            The number of parallel jobs over which the light cone projection
            will be split.  Choose -1 for one processor per individual
            projection and 1 to have all processors work together on each
            projection.
            Default: 1.
        dynamic : bool
            If True, use dynamic load balancing to create the projections.
            Default: False.

        """

        if isinstance(field_of_view, tuple) and len(field_of_view) == 2:
            field_of_view = self.simulation.quan(field_of_view[0],
                                                 field_of_view[1])
        elif not isinstance(field_of_view, YTArray):
          raise RuntimeError("field_of_view argument must be either a YTQauntity " +
                             "or a tuple of type (float, str).")
        if isinstance(image_resolution, tuple) and len(image_resolution) == 2:
            image_resolution = self.simulation.quan(image_resolution[0],
                                                    image_resolution[1])
        elif not isinstance(image_resolution, YTArray):
          raise RuntimeError("image_resolution argument must be either a YTQauntity " +
                             "or a tuple of type (float, str).")
        
        # Calculate number of pixels on a side.
        pixels = (field_of_view / image_resolution).in_units("")

        # Clear projection stack.
        projection_stack = []
        projection_weight_stack = []
        if "object" in self.light_cone_solution[-1]:
            del self.light_cone_solution[-1]["object"]

        # for q, output in enumerate(self.light_cone_solution):
        all_storage = {}
        for my_storage, output in parallel_objects(self.light_cone_solution,
                                                   storage=all_storage,
                                                   dynamic=dynamic):
            output["object"] = load(output["filename"])
            output["object"].parameters.update(self.set_parameters)

            # Calculate fraction of box required for width corresponding to
            # requested image size.
            proper_box_size = self.simulation.box_size / \
              (1.0 + output["redshift"])
            output["box_width_fraction"] = (output["box_width_per_angle"] *
                                            field_of_view).in_units("")
            
            frb = _light_cone_projection(output, field, pixels,
                                         weight_field=weight_field)

            if photon_field:
                # Decrement the flux by the luminosity distance.
                # Assume field in frb is in erg/s/cm^2/Hz
                dL = self.cosmology.luminosity_distance(self.observer_redshift,
                                                        output["redshift"])
                proper_box_size = self.simulation.box_size / \
                  (1.0 + output["redshift"])
                pixel_area = (proper_box_size.in_cgs() / pixels)**2 #in proper cm^2
                factor = pixel_area / (4.0 * np.pi * dL.in_cgs()**2)
                mylog.info("Distance to slice = %s" % dL)
                frb[field] *= factor #in erg/s/cm^2/Hz on observer"s image plane.

            if weight_field is None:
                my_storage.result = {"field": frb[field]}
            else:
                my_storage.result = {"field": (frb[field] *
                                               frb["weight_field"]),
                                     "weight_field": frb["weight_field"]}

            del output["object"]

        # Combine results from each slice.
        all_slices = list(all_storage.keys())
        all_slices.sort()
        for my_slice in all_slices:
            if save_slice_images:
                name = os.path.join(self.output_dir,
                                    "%s_%04d_%04d" %
                                    (self.output_prefix,
                                     my_slice, len(self.light_cone_solution)))
                if weight_field is None:
                    my_image = all_storage[my_slice]["field"]
                else:
                    my_image = all_storage[my_slice]["field"] / \
                      all_storage[my_slice]["weight_field"]
                only_on_root(write_image, np.log10(my_image),
                             "%s_%s.png" % (name, field), cmap_name=cmap_name)

            projection_stack.append(all_storage[my_slice]["field"])
            if weight_field is not None:
                projection_weight_stack.append(all_storage[my_slice]["field"])

        projection_stack = self.simulation.arr(projection_stack)
        projection_weight_stack = self.simulation.arr(projection_weight_stack)
                
        # Add up slices to make light cone projection.
        if (weight_field is None):
            light_cone_projection = projection_stack.sum(axis=0)
        else:
            light_cone_projection = \
              projection_stack.sum(axis=0) / \
              self.simulation.arr(projection_weight_stack).sum(axis=0)

        filename = os.path.join(self.output_dir, self.output_prefix)

        # Write image.
        if save_final_image:
            only_on_root(write_image, np.log10(light_cone_projection),
                         "%s_%s.png" % (filename, field), cmap_name=cmap_name)

        # Write stack to hdf5 file.
        if save_stack:
            self._save_light_cone_stack(field, weight_field,
                projection_stack, projection_weight_stack,
                filename=filename,
                attrs={"field_of_view": str(field_of_view),
                       "image_resolution": str(image_resolution)})
Example #16
0
 def __iter__(self):
     for o in self._pre_outputs:
         fn, step = o
         ds = load(fn, step=step)
         self._setup_function(ds)
         yield ds
Example #17
0
    def test_absorption_spectrum_with_zero_field(self):
        """
        This test generates an absorption spectrum with some
        particle dataset
        """

        ds = load(FIRE)
        lr = LightRay(ds)

        # Define species and associated parameters to add to continuum
        # Parameters used for both adding the transition to the spectrum
        # and for fitting
        # Note that for single species that produce multiple lines
        # (as in the OVI doublet), 'numLines' will be equal to the number
        # of lines, and f,gamma, and wavelength will have multiple values.

        HI_parameters = {
            'name': 'HI',
            'field': 'H_p0_number_density',
            'f': [.4164],
            'Gamma': [6.265E8],
            'wavelength': [1215.67],
            'mass': 1.00794,
            'numLines': 1,
            'maxN': 1E22, 'minN': 1E11,
            'maxb': 300, 'minb': 1,
            'maxz': 6, 'minz': 0,
            'init_b': 30,
            'init_N': 1E14
        }

        species_dicts = {'HI': HI_parameters}


        # Get all fields that need to be added to the light ray
        fields = [('gas','temperature')]
        for s, params in species_dicts.items():
            fields.append(params['field'])

        # With a single dataset, a start_position and
        # end_position or trajectory must be given.
        # Trajectory should be given as (r, theta, phi)
        lr.make_light_ray(
            start_position=ds.arr([0., 0., 0.], 'unitary'),
            end_position=ds.arr([1., 1., 1.], 'unitary'),
            solution_filename='test_lightraysolution.txt',
            data_filename='test_lightray.h5',
            fields=fields)

        # Create an AbsorptionSpectrum object extending from
        # lambda = 900 to lambda = 1800, with 10000 pixels
        sp = AbsorptionSpectrum(900.0, 1400.0, 50000)

        # Iterate over species
        for s, params in species_dicts.items():
            # Iterate over transitions for a single species
            for i in range(params['numLines']):
                # Add the lines to the spectrum
                sp.add_line(
                    s, params['field'],
                    params['wavelength'][i], params['f'][i],
                    params['Gamma'][i], params['mass'],
                    label_threshold=1.e10)


        # Make and save spectrum
        wavelength, flux = sp.make_spectrum(
            'test_lightray.h5',
            output_file='test_spectrum.h5',
            line_list_file='test_lines.txt',
            use_peculiar_velocity=True)
Example #18
0
def test_fits_image():
    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    fields = ("density", "temperature")
    units = (
        'g/cm**3',
        'K',
    )
    ds = fake_random_ds(64,
                        fields=fields,
                        units=units,
                        nprocs=16,
                        length_unit=100.0)

    prj = ds.proj("density", 2)
    prj_frb = prj.to_frb((0.5, "unitary"), 128)

    fid1 = FITSImageData(prj_frb,
                         fields=["density", "temperature"],
                         units="cm")
    fits_prj = FITSProjection(ds,
                              "z", [ds.fields.gas.density, "temperature"],
                              image_res=128,
                              width=(0.5, "unitary"))

    assert_equal(fid1["density"].data, fits_prj["density"].data)
    assert_equal(fid1["temperature"].data, fits_prj["temperature"].data)

    fid1.writeto("fid1.fits", overwrite=True)
    new_fid1 = FITSImageData.from_file("fid1.fits")

    assert_equal(fid1["density"].data, new_fid1["density"].data)
    assert_equal(fid1["temperature"].data, new_fid1["temperature"].data)

    ds2 = load("fid1.fits")
    ds2.index

    assert ("fits", "density") in ds2.field_list
    assert ("fits", "temperature") in ds2.field_list

    dw_cm = ds2.domain_width.in_units("cm")

    assert dw_cm[0].v == 50.
    assert dw_cm[1].v == 50.

    slc = ds.slice(2, 0.5)
    slc_frb = slc.to_frb((0.5, "unitary"), 128)

    fid2 = FITSImageData(slc_frb,
                         fields=["density", "temperature"],
                         units="cm")
    fits_slc = FITSSlice(ds,
                         "z", ["density", ("gas", "temperature")],
                         image_res=128,
                         width=(0.5, "unitary"))

    assert_equal(fid2["density"].data, fits_slc["density"].data)
    assert_equal(fid2["temperature"].data, fits_slc["temperature"].data)

    dens_img = fid2.pop("density")
    temp_img = fid2.pop("temperature")

    # This already has some assertions in it, so we don't need to do anything
    # with it other than just make one
    FITSImageData.from_images([dens_img, temp_img])

    cut = ds.cutting([0.1, 0.2, -0.9], [0.5, 0.42, 0.6])
    cut_frb = cut.to_frb((0.5, "unitary"), 128)

    fid3 = FITSImageData(cut_frb,
                         fields=[("gas", "density"),
                                 ds.fields.gas.temperature],
                         units="cm")
    fits_cut = FITSOffAxisSlice(ds, [0.1, 0.2, -0.9],
                                ["density", "temperature"],
                                image_res=128,
                                center=[0.5, 0.42, 0.6],
                                width=(0.5, "unitary"))

    assert_equal(fid3["density"].data, fits_cut["density"].data)
    assert_equal(fid3["temperature"].data, fits_cut["temperature"].data)

    fid3.create_sky_wcs([30., 45.], (1.0, "arcsec/kpc"))
    fid3.writeto("fid3.fits", overwrite=True)
    new_fid3 = FITSImageData.from_file("fid3.fits")
    assert_same_wcs(fid3.wcs, new_fid3.wcs)
    assert new_fid3.wcs.wcs.cunit[0] == "deg"
    assert new_fid3.wcs.wcs.cunit[1] == "deg"
    assert new_fid3.wcs.wcs.ctype[0] == "RA---TAN"
    assert new_fid3.wcs.wcs.ctype[1] == "DEC--TAN"

    buf = off_axis_projection(ds, ds.domain_center, [0.1, 0.2, -0.9], 0.5, 128,
                              "density").swapaxes(0, 1)
    fid4 = FITSImageData(buf, fields="density", width=100.0)
    fits_oap = FITSOffAxisProjection(ds, [0.1, 0.2, -0.9],
                                     "density",
                                     width=(0.5, "unitary"),
                                     image_res=128,
                                     depth=(0.5, "unitary"))

    assert_equal(fid4["density"].data, fits_oap["density"].data)

    fid4.create_sky_wcs([30., 45.], (1.0, "arcsec/kpc"), replace_old_wcs=False)
    assert fid4.wcs.wcs.cunit[0] == "cm"
    assert fid4.wcs.wcs.cunit[1] == "cm"
    assert fid4.wcs.wcs.ctype[0] == "linear"
    assert fid4.wcs.wcs.ctype[1] == "linear"
    assert fid4.wcsa.wcs.cunit[0] == "deg"
    assert fid4.wcsa.wcs.cunit[1] == "deg"
    assert fid4.wcsa.wcs.ctype[0] == "RA---TAN"
    assert fid4.wcsa.wcs.ctype[1] == "DEC--TAN"

    cvg = ds.covering_grid(ds.index.max_level, [0.25, 0.25, 0.25],
                           [32, 32, 32],
                           fields=["density", "temperature"])
    fid5 = FITSImageData(cvg, fields=["density", "temperature"])
    assert fid5.dimensionality == 3

    fid5.update_header("density", "time", 0.1)
    fid5.update_header("all", "units", "cgs")

    assert fid5["density"].header["time"] == 0.1
    assert fid5["temperature"].header["units"] == "cgs"
    assert fid5["density"].header["units"] == "cgs"

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
Example #19
0
    def make_light_ray(self, seed=None,
                       start_position=None, end_position=None,
                       trajectory=None,
                       fields=None, setup_function=None,
                       solution_filename=None, data_filename=None,
                       get_los_velocity=True, redshift=None,
                       njobs=-1):
        """
        make_light_ray(seed=None, start_position=None, end_position=None,
                       trajectory=None, fields=None, setup_function=None,
                       solution_filename=None, data_filename=None,
                       get_los_velocity=True, redshift=None,
                       njobs=-1)

        Create a light ray and get field values for each lixel.  A light
        ray consists of a list of field values for cells intersected by
        the ray and the path length of the ray through those cells.
        Light ray data can be written out to an hdf5 file.

        Parameters
        ----------
        seed : optional, int
            Seed for the random number generator.
            Default: None.
        start_position : optional, list of floats
            Used only if creating a light ray from a single dataset.
            The coordinates of the starting position of the ray.
            Default: None.
        end_position : optional, list of floats
            Used only if creating a light ray from a single dataset.
            The coordinates of the ending position of the ray.
            Default: None.
        trajectory : optional, list of floats
            Used only if creating a light ray from a single dataset.
            The (r, theta, phi) direction of the light ray.  Use either 
            end_position or trajectory, not both.
            Default: None.
        fields : optional, list
            A list of fields for which to get data.
            Default: None.
        setup_function : optional, callable, accepts a ds
            This function will be called on each dataset that is loaded 
            to create the light ray.  For, example, this can be used to 
            add new derived fields.
            Default: None.
        solution_filename : optional, string
            Path to a text file where the trajectories of each
            subray is written out.
            Default: None.
        data_filename : optional, string
            Path to output file for ray data.
            Default: None.
        get_los_velocity : optional, bool
            If True, the line of sight velocity is calculated for
            each point in the ray.
            Default: True.
        redshift : optional, float
            Used with light rays made from single datasets to specify a 
            starting redshift for the ray.  If not used, the starting 
            redshift will be 0 for a non-cosmological dataset and 
            the dataset redshift for a cosmological dataset.
            Default: None.
        njobs : optional, int
            The number of parallel jobs over which the segments will 
            be split.  Choose -1 for one processor per segment.
            Default: -1.

        Examples
        --------

        Make a light ray from multiple datasets:
        
        >>> import yt
        >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
        ...     LightRay
        >>> my_ray = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo",
        ...                   0., 0.1, time_data=False)
        ...
        >>> my_ray.make_light_ray(seed=12345,
        ...                       solution_filename="solution.txt",
        ...                       data_filename="my_ray.h5",
        ...                       fields=["temperature", "density"],
        ...                       get_los_velocity=True)

        Make a light ray from a single dataset:

        >>> import yt
        >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
        ...     LightRay
        >>> my_ray = LightRay("IsolatedGalaxy/galaxy0030/galaxy0030")
        ...
        >>> my_ray.make_light_ray(start_position=[0., 0., 0.],
        ...                       end_position=[1., 1., 1.],
        ...                       solution_filename="solution.txt",
        ...                       data_filename="my_ray.h5",
        ...                       fields=["temperature", "density"],
        ...                       get_los_velocity=True)
        
        """

        # Calculate solution.
        self._calculate_light_ray_solution(seed=seed, 
                                           start_position=start_position, 
                                           end_position=end_position,
                                           trajectory=trajectory,
                                           filename=solution_filename)

        # Initialize data structures.
        self._data = {}
        if fields is None: fields = []
        data_fields = fields[:]
        all_fields = fields[:]
        all_fields.extend(['dl', 'dredshift', 'redshift'])
        if get_los_velocity:
            all_fields.extend(['velocity_x', 'velocity_y',
                               'velocity_z', 'velocity_los'])
            data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])

        all_ray_storage = {}
        for my_storage, my_segment in parallel_objects(self.light_ray_solution,
                                                       storage=all_ray_storage,
                                                       njobs=njobs):

            # Load dataset for segment.
            ds = load(my_segment['filename'], **self.load_kwargs)

            my_segment['unique_identifier'] = ds.unique_identifier
            if redshift is not None:
                if ds.cosmological_simulation and redshift != ds.current_redshift:
                    mylog.warn("Generating light ray with different redshift than " +
                               "the dataset itself.")
                my_segment["redshift"] = redshift

            if setup_function is not None:
                setup_function(ds)

            if start_position is not None:
                my_segment["start"] = ds.arr(my_segment["start"], "code_length")
                my_segment["end"] = ds.arr(my_segment["end"], "code_length")
            else:
                my_segment["start"] = ds.domain_width * my_segment["start"] + \
                  ds.domain_left_edge
                my_segment["end"] = ds.domain_width * my_segment["end"] + \
                  ds.domain_left_edge

            if not ds.cosmological_simulation:
                next_redshift = my_segment["redshift"]
            elif self.near_redshift == self.far_redshift:
                next_redshift = my_segment["redshift"] - \
                  self._deltaz_forward(my_segment["redshift"], 
                                       ds.domain_width[0].in_units("Mpccm / h") *
                                       my_segment["traversal_box_fraction"])
            elif my_segment.get("next", None) is None:
                next_redshift = self.near_redshift
            else:
                next_redshift = my_segment['next']['redshift']

            mylog.info("Getting segment at z = %s: %s to %s." %
                       (my_segment['redshift'], my_segment['start'],
                        my_segment['end']))

            # Break periodic ray into non-periodic segments.
            sub_segments = periodic_ray(my_segment['start'], my_segment['end'],
                                        left=ds.domain_left_edge,
                                        right=ds.domain_right_edge)

            # Prepare data structure for subsegment.
            sub_data = {}
            sub_data['segment_redshift'] = my_segment['redshift']
            for field in all_fields:
                sub_data[field] = []

            # Get data for all subsegments in segment.
            for sub_segment in sub_segments:
                mylog.info("Getting subsegment: %s to %s." %
                           (list(sub_segment[0]), list(sub_segment[1])))
                sub_ray = ds.ray(sub_segment[0], sub_segment[1])
                asort = np.argsort(sub_ray["t"])
                sub_data['dl'].extend(sub_ray['dts'][asort] *
                                      vector_length(sub_ray.start_point,
                                                    sub_ray.end_point))
                for field in data_fields:
                    sub_data[field].extend(sub_ray[field][asort])

                if get_los_velocity:
                    line_of_sight = sub_segment[1] - sub_segment[0]
                    line_of_sight /= ((line_of_sight**2).sum())**0.5
                    sub_vel = ds.arr([sub_ray['velocity_x'],
                                      sub_ray['velocity_y'],
                                      sub_ray['velocity_z']])
                    sub_data['velocity_los'].extend((np.rollaxis(sub_vel, 1) *
                                                     line_of_sight).sum(axis=1)[asort])
                    del sub_vel

                sub_ray.clear_data()
                del sub_ray, asort

            for key in sub_data:
                sub_data[key] = ds.arr(sub_data[key]).in_cgs()

            # Get redshift for each lixel.  Assume linear relation between l and z.
            sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
                (sub_data['dl'] / vector_length(my_segment['start'], 
                                                my_segment['end']).in_cgs())
            sub_data['redshift'] = my_segment['redshift'] - \
              sub_data['dredshift'].cumsum() + sub_data['dredshift']

            # Remove empty lixels.
            sub_dl_nonzero = sub_data['dl'].nonzero()
            for field in all_fields:
                sub_data[field] = sub_data[field][sub_dl_nonzero]
            del sub_dl_nonzero

            # Add to storage.
            my_storage.result = sub_data

            del ds

        # Reconstruct ray data from parallel_objects storage.
        all_data = [my_data for my_data in all_ray_storage.values()]
        # This is now a list of segments where each one is a dictionary
        # with all the fields.
        all_data.sort(key=lambda a:a['segment_redshift'], reverse=True)
        # Flatten the list into a single dictionary containing fields
        # for the whole ray.
        all_data = _flatten_dict_list(all_data, exceptions=['segment_redshift'])

        if data_filename is not None:
            self._write_light_ray(data_filename, all_data)

        self._data = all_data
        return all_data
def test_fits_image():
    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    fields = ("density", "temperature")
    units = ('g/cm**3', 'K',)
    ds = fake_random_ds(64, fields=fields, units=units, nprocs=16,
                        length_unit=100.0)

    prj = ds.proj("density", 2)
    prj_frb = prj.to_frb((0.5, "unitary"), 128)

    fid1 = FITSImageData(prj_frb, fields=["density","temperature"], units="cm")
    fits_prj = FITSProjection(ds, "z", ["density","temperature"], image_res=128,
                              width=(0.5,"unitary"))

    yield assert_equal, fid1.get_data("density"), fits_prj.get_data("density")
    yield assert_equal, fid1.get_data("temperature"), fits_prj.get_data("temperature")

    fid1.writeto("fid1.fits", clobber=True)
    new_fid1 = FITSImageData.from_file("fid1.fits")

    yield assert_equal, fid1.get_data("density"), new_fid1.get_data("density")
    yield assert_equal, fid1.get_data("temperature"), new_fid1.get_data("temperature")

    ds2 = load("fid1.fits")
    ds2.index

    assert ("fits","density") in ds2.field_list
    assert ("fits","temperature") in ds2.field_list

    dw_cm = ds2.domain_width.in_units("cm")

    assert dw_cm[0].v == 50.
    assert dw_cm[1].v == 50.

    slc = ds.slice(2, 0.5)
    slc_frb = slc.to_frb((0.5, "unitary"), 128)

    fid2 = FITSImageData(slc_frb, fields=["density","temperature"], units="cm")
    fits_slc = FITSSlice(ds, "z", ["density","temperature"], image_res=128,
                         width=(0.5,"unitary"))

    yield assert_equal, fid2.get_data("density"), fits_slc.get_data("density")
    yield assert_equal, fid2.get_data("temperature"), fits_slc.get_data("temperature")

    dens_img = fid2.pop("density")
    temp_img = fid2.pop("temperature")

    # This already has some assertions in it, so we don't need to do anything
    # with it other can just make one
    fid_comb = FITSImageData.from_images([dens_img, temp_img])

    cut = ds.cutting([0.1, 0.2, -0.9], [0.5, 0.42, 0.6])
    cut_frb = cut.to_frb((0.5, "unitary"), 128)

    fid3 = FITSImageData(cut_frb, fields=["density","temperature"], units="cm")
    fits_cut = FITSOffAxisSlice(ds, [0.1, 0.2, -0.9], ["density","temperature"],
                                image_res=128, center=[0.5, 0.42, 0.6],
                                width=(0.5,"unitary"))

    yield assert_equal, fid3.get_data("density"), fits_cut.get_data("density")
    yield assert_equal, fid3.get_data("temperature"), fits_cut.get_data("temperature")

    fid3.create_sky_wcs([30.,45.], (1.0,"arcsec/kpc"))
    fid3.writeto("fid3.fits", clobber=True)
    new_fid3 = FITSImageData.from_file("fid3.fits")
    assert_same_wcs(fid3.wcs, new_fid3.wcs)
    assert new_fid3.wcs.wcs.cunit[0] == "deg"
    assert new_fid3.wcs.wcs.cunit[1] == "deg"
    assert new_fid3.wcs.wcs.ctype[0] == "RA---TAN"
    assert new_fid3.wcs.wcs.ctype[1] == "DEC--TAN"

    buf = off_axis_projection(ds, ds.domain_center, [0.1, 0.2, -0.9],
                              0.5, 128, "density").swapaxes(0, 1)
    fid4 = FITSImageData(buf, fields="density", width=100.0)
    fits_oap = FITSOffAxisProjection(ds, [0.1, 0.2, -0.9], "density",
                                     width=(0.5,"unitary"), image_res=128,
                                     depth_res=128, depth=(0.5,"unitary"))

    yield assert_equal, fid4.get_data("density"), fits_oap.get_data("density")

    cvg = ds.covering_grid(ds.index.max_level, [0.25,0.25,0.25],
                           [32, 32, 32], fields=["density","temperature"])
    fid5 = FITSImageData(cvg, fields=["density","temperature"])
    assert fid5.dimensionality == 3

    fid5.update_header("density", "time", 0.1)
    fid5.update_header("all", "units", "cgs")

    assert fid5["density"].header["time"] == 0.1
    assert fid5["temperature"].header["units"] == "cgs"
    assert fid5["density"].header["units"] == "cgs"

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
Example #21
0
    def _calculate_light_ray_solution(self, seed=None,
                                      left_edge=None, right_edge=None,
                                      min_level=None, periodic=True,
                                      start_position=None, end_position=None,
                                      trajectory=None, filename=None):
        "Create list of datasets to be added together to make the light ray."

        # Calculate dataset sizes, and get random dataset axes and centers.
        my_random = np.random.RandomState(seed)

        # If using only one dataset, set start and stop manually.
        if start_position is not None:
            if self.near_redshift is not None or self.far_redshift is not None:
                raise RuntimeError("LightRay Error: cannot specify both " + \
                                   "start_position and a redshift range.")
            if not ((end_position is None) ^ (trajectory is None)):
                raise RuntimeError("LightRay Error: must specify either end_position " + \
                                   "or trajectory, but not both.")
            self.light_ray_solution[0]['start'] = start_position
            if end_position is not None:
                self.light_ray_solution[0]['end'] = end_position
            else:
                # assume trajectory given as r, theta, phi
                if len(trajectory) != 3:
                    raise RuntimeError("LightRay Error: trajectory must have length 3.")
                r, theta, phi = trajectory
                self.light_ray_solution[0]['end'] = self.light_ray_solution[0]['start'] + \
                  r * np.array([np.cos(phi) * np.sin(theta),
                                np.sin(phi) * np.sin(theta),
                                np.cos(theta)])
            self.light_ray_solution[0]['traversal_box_fraction'] = \
              vector_length(self.light_ray_solution[0]['start'],
                            self.light_ray_solution[0]['end'])

        # the normal way (random start positions and trajectories for each dataset)
        else:

            # For box coherence, keep track of effective depth travelled.
            box_fraction_used = 0.0

            for q in range(len(self.light_ray_solution)):
                if (q == len(self.light_ray_solution) - 1):
                    z_next = self.near_redshift
                else:
                    z_next = self.light_ray_solution[q+1]['redshift']

                # Calculate fraction of box required for a depth of delta z
                self.light_ray_solution[q]['traversal_box_fraction'] = \
                    self.cosmology.comoving_radial_distance(z_next, \
                        self.light_ray_solution[q]['redshift']).in_units("Mpccm / h") / \
                        self.simulation.box_size

                # Get dataset axis and center.
                # If using box coherence, only get start point and vector if
                # enough of the box has been used.
                if (q == 0) or (box_fraction_used >=
                                self.minimum_coherent_box_fraction):
                    if periodic:
                        self.light_ray_solution[q]['start'] = left_edge + \
                          (right_edge - left_edge) * my_random.random_sample(3)
                        theta = np.pi * my_random.random_sample()
                        phi = 2 * np.pi * my_random.random_sample()
                        box_fraction_used = 0.0
                    else:
                        ds = load(self.light_ray_solution[q]["filename"])
                        ray_length = \
                          ds.quan(self.light_ray_solution[q]['traversal_box_fraction'],
                                  "unitary")
                        self.light_ray_solution[q]['start'], \
                          self.light_ray_solution[q]['end'] = \
                          non_periodic_ray(ds, left_edge, right_edge, ray_length,
                                           my_random=my_random, min_level=min_level)
                        del ds
                else:
                    # Use end point of previous segment, adjusted for periodicity,
                    # and the same trajectory.
                    self.light_ray_solution[q]['start'] = \
                      periodic_adjust(self.light_ray_solution[q-1]['end'][:],
                                      left=left_edge, right=right_edge)

                if "end" not in self.light_ray_solution[q]:
                    self.light_ray_solution[q]['end'] = \
                      self.light_ray_solution[q]['start'] + \
                        self.light_ray_solution[q]['traversal_box_fraction'] * \
                        self.simulation.box_size * \
                        np.array([np.cos(phi) * np.sin(theta),
                                  np.sin(phi) * np.sin(theta),
                                  np.cos(theta)])
                box_fraction_used += \
                  self.light_ray_solution[q]['traversal_box_fraction']

        if filename is not None:
            self._write_light_ray_solution(filename,
                extra_info={'parameter_filename':self.parameter_filename,
                            'random_seed':seed,
                            'far_redshift':self.far_redshift,
                            'near_redshift':self.near_redshift})
Example #22
0
    def __init__(self, parameter_filename, simulation_type=None,
                 near_redshift=None, far_redshift=None,
                 use_minimum_datasets=True, max_box_fraction=1.0,
                 deltaz_min=0.0, minimum_coherent_box_fraction=0.0,
                 time_data=True, redshift_data=True,
                 find_outputs=False, load_kwargs=None):

        if near_redshift is not None and far_redshift is not None and \
          near_redshift >= far_redshift:
            raise RuntimeError(
                "near_redshift must be less than far_redshift.")

        self.near_redshift = near_redshift
        self.far_redshift = far_redshift
        self.use_minimum_datasets = use_minimum_datasets
        self.deltaz_min = deltaz_min
        self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
        self.parameter_filename = parameter_filename
        if load_kwargs is None:
            self.load_kwargs = {}
        else:
            self.load_kwargs = load_kwargs
        self.light_ray_solution = []
        self._data = {}

        # The options here are:
        # 1) User passed us a dataset: use it to make a simple ray
        # 2) User passed us a dataset filename: use it to make a simple ray
        # 3) User passed us a simulation filename: use it to make a compound ray

        # Make a light ray from a single, given dataset: #1, #2
        if simulation_type is None:     
            self.simulation_type = simulation_type
            if isinstance(self.parameter_filename, Dataset):
                self.ds = self.parameter_filename
                self.parameter_filename = self.ds.basename
            elif isinstance(self.parameter_filename, str):
                self.ds = load(self.parameter_filename, **self.load_kwargs)
            if self.ds.cosmological_simulation:
                redshift = self.ds.current_redshift
                self.cosmology = Cosmology(
                    hubble_constant=self.ds.hubble_constant,
                    omega_matter=self.ds.omega_matter,
                    omega_lambda=self.ds.omega_lambda)
            else:
                redshift = 0.
            self.light_ray_solution.append({"filename": self.parameter_filename,
                                            "redshift": redshift})

        # Make a light ray from a simulation time-series. #3
        else:
            self.ds = None
            assert isinstance(self.parameter_filename, str)
            # Get list of datasets for light ray solution.
            CosmologySplice.__init__(self, self.parameter_filename, simulation_type,
                                     find_outputs=find_outputs)
            self.light_ray_solution = \
              self.create_cosmology_splice(
                  self.near_redshift, self.far_redshift,
                  minimal=self.use_minimum_datasets,
                  max_box_fraction=max_box_fraction,
                  deltaz_min=self.deltaz_min,
                  time_data=time_data,
                  redshift_data=redshift_data)
Example #23
0
def load_sample(name=None, specific_file=None, pbar=True):
    """
    Load sample data with yt. Simple wrapper around yt.load to include fetching
    data with pooch.

    Parameters
    ----------
    name : str or None
        The name of the sample data to load. This is generally the name of the
        folder of the dataset. For IsolatedGalaxy, the name would be
        `IsolatedGalaxy`.  If `None` is supplied, the return value
        will be a list of all known datasets (by name).

    specific_file : str, optional
        optional argument -- the name of the file to load that is located
        within sample dataset of `name`. For the dataset `enzo_cosmology_plus`,
        which has a number of timesteps available, one may wish to choose
        DD0003. The file specifically would be
        `enzo_cosmology_plus/DD0003/DD0003`, and the argument passed to this
        variable would be `DD0003/DD0003`

    pbar: bool
        display a progress bar

    """
    fido = sd.Fido()
    if name is None:
        keys = []
        for key in fido._registry:
            for ext in sd._extensions_to_strip:
                if key.endswith(ext):
                    key = key[:-len(ext)]
            keys.append(key)
        return keys

    base_path = fido.fido.path
    fileext, name, extension = _validate_sampledata_name(name)

    downloader = None
    if pbar:
        try:
            import tqdm  # noqa: F401

            downloader = pch.pooch.HTTPDownloader(progressbar=True)
        except ImportError:
            mylog.warning(
                "tqdm is not installed, progress bar can not be displayed.")

    if extension == "h5":
        fname = fetch_noncompressed_file(fileext, fido, downloader=downloader)
    else:
        # we are going to assume most files that exist on the hub are
        # compressed in .tar folders. Some may not.
        fname = fetch_compressed_file(fileext, fido, downloader=downloader)

    # The `folder_path` variable is used here to notify the user where the
    # files have been unpacked to. However, we can't assume this is reliable
    # because in some cases the common path will overlap with the `load_name`
    # variable of the file.
    folder_path = os.path.commonprefix(fname)
    mylog.info("Files located at %s", folder_path)

    # Location of the file to load automatically, registered in the Fido class
    info = fido[fileext]
    file_lookup = info["load_name"]
    optional_args = info["load_kwargs"]

    if specific_file is None:
        # right now work on loading only untarred files. build out h5 later
        mylog.info("Default to loading %s for %s dataset", file_lookup, name)
        loaded_file = os.path.join(base_path, f"{fileext}.untar", name,
                                   file_lookup)
    else:
        mylog.info("Loading %s for %s dataset", specific_file, name)
        loaded_file = os.path.join(base_path, f"{fileext}.untar", name,
                                   specific_file)

    return load(loaded_file, **optional_args)
Example #24
0
def test_contour_callback():
    with _cleanup_fname() as prefix:
        ds = fake_amr_ds(fields=("density", "temperature"))
        for ax in 'xyz':
            p = ProjectionPlot(ds, ax, "density")
            p.annotate_contour("temperature")
            assert_fname(p.save(prefix)[0])
            p = ProjectionPlot(ds, ax, "density", weight_field="density")
            p.annotate_contour("temperature")
            assert_fname(p.save(prefix)[0])
            p = SlicePlot(ds, ax, "density")
            p.annotate_contour("temperature")  # BREAKS WITH ndarray
            assert_fname(p.save(prefix)[0])
        # Now we'll check a few additional minor things
        p = SlicePlot(ds, "x", "density")
        p.annotate_contour("temperature",
                           ncont=10,
                           factor=8,
                           take_log=False,
                           clim=(0.4, 0.6),
                           plot_args={'lw': 2.0},
                           label=True,
                           text_args={'text-size': 'x-large'})
        p.save(prefix)

        p = SlicePlot(ds, "x", "density")
        s2 = ds.slice(0, 0.2)
        p.annotate_contour("temperature",
                           ncont=10,
                           factor=8,
                           take_log=False,
                           clim=(0.4, 0.6),
                           plot_args={'lw': 2.0},
                           label=True,
                           text_args={'text-size': 'x-large'},
                           data_source=s2)
        p.save(prefix)

    with _cleanup_fname() as prefix:
        ds = load(cyl_2d)
        slc = SlicePlot(ds, "theta", "plasma_beta")
        slc.annotate_contour("plasma_beta",
                             ncont=2,
                             factor=7.,
                             take_log=False,
                             clim=(1.e-1, 1.e1),
                             label=True,
                             plot_args={
                                 "colors": ("c", "w"),
                                 "linewidths": 1
                             },
                             text_args={"fmt": "%1.1f"})
        assert_fname(slc.save(prefix)[0])

    with _cleanup_fname() as prefix:
        ds = fake_amr_ds(fields=("density", "temperature"),
                         geometry="spherical")
        p = SlicePlot(ds, "r", "density")
        p.annotate_contour("temperature",
                           ncont=10,
                           factor=8,
                           take_log=False,
                           clim=(0.4, 0.6),
                           plot_args={'lw': 2.0},
                           label=True,
                           text_args={'text-size': 'x-large'})
        assert_raises(YTDataTypeUnsupported, p.save, prefix)
Example #25
0
    def make_light_ray(self, seed=None, periodic=True,
                       left_edge=None, right_edge=None, min_level=None,
                       start_position=None, end_position=None,
                       trajectory=None,
                       fields=None, setup_function=None,
                       solution_filename=None, data_filename=None,
                       get_los_velocity=None, use_peculiar_velocity=True,
                       redshift=None, field_parameters=None, njobs=-1):
        """
        make_light_ray(seed=None, periodic=True,
                       left_edge=None, right_edge=None, min_level=None,
                       start_position=None, end_position=None,
                       trajectory=None, fields=None, setup_function=None,
                       solution_filename=None, data_filename=None,
                       use_peculiar_velocity=True, redshift=None,
                       njobs=-1)

        Create a light ray and get field values for each lixel.  A light
        ray consists of a list of field values for cells intersected by
        the ray and the path length of the ray through those cells.
        Light ray data must be written out to an hdf5 file.

        Parameters
        ----------
        seed : optional, int
            Seed for the random number generator.
            Default: None.
        periodic : optional, bool
            If True, ray trajectories will make use of periodic
            boundaries.  If False, ray trajectories will not be
            periodic.
            Default : True.
        left_edge : optional, iterable of floats or YTArray
            The left corner of the region in which rays are to be
            generated.  If None, the left edge will be that of the
            domain.  If specified without units, it is assumed to
            be in code units.
            Default: None.
        right_edge : optional, iterable of floats or YTArray
            The right corner of the region in which rays are to be
            generated.  If None, the right edge will be that of the
            domain.  If specified without units, it is assumed to
            be in code units.
            Default: None.
        min_level : optional, int
            The minimum refinement level of the spatial region in which
            the ray passes.  This can be used with zoom-in simulations
            where the high resolution region does not keep a constant
            geometry.
            Default: None.
        start_position : optional, iterable of floats or YTArray.
            Used only if creating a light ray from a single dataset.
            The coordinates of the starting position of the ray.
            If specified without units, it is assumed to be in code units.
            Default: None.
        end_position : optional, iterable of floats or YTArray.
            Used only if creating a light ray from a single dataset.
            The coordinates of the ending position of the ray.
            If specified without units, it is assumed to be in code units.
            Default: None.
        trajectory : optional, list of floats
            Used only if creating a light ray from a single dataset.
            The (r, theta, phi) direction of the light ray.  Use either
            end_position or trajectory, not both.
            Default: None.
        fields : optional, list
            A list of fields for which to get data.
            Default: None.
        setup_function : optional, callable, accepts a ds
            This function will be called on each dataset that is loaded
            to create the light ray.  For, example, this can be used to
            add new derived fields.
            Default: None.
        solution_filename : optional, string
            Path to a text file where the trajectories of each
            subray is written out.
            Default: None.
        data_filename : optional, string
            Path to output file for ray data.
            Default: None.
        use_peculiar_velocity : optional, bool
            If True, the peculiar velocity along the ray will be sampled for
            calculating the effective redshift combining the cosmological
            redshift and the doppler redshift.
            Default: True.
        redshift : optional, float
            Used with light rays made from single datasets to specify a
            starting redshift for the ray.  If not used, the starting
            redshift will be 0 for a non-cosmological dataset and
            the dataset redshift for a cosmological dataset.
            Default: None.
        njobs : optional, int
            The number of parallel jobs over which the segments will
            be split.  Choose -1 for one processor per segment.
            Default: -1.

        Examples
        --------

        Make a light ray from multiple datasets:

        >>> import yt
        >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
        ...     LightRay
        >>> my_ray = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo",
        ...                   0., 0.1, time_data=False)
        ...
        >>> my_ray.make_light_ray(seed=12345,
        ...                       solution_filename="solution.txt",
        ...                       data_filename="my_ray.h5",
        ...                       fields=["temperature", "density"],
        ...                       use_peculiar_velocity=True)

        Make a light ray from a single dataset:

        >>> import yt
        >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
        ...     LightRay
        >>> my_ray = LightRay("IsolatedGalaxy/galaxy0030/galaxy0030")
        ...
        >>> my_ray.make_light_ray(start_position=[0., 0., 0.],
        ...                       end_position=[1., 1., 1.],
        ...                       solution_filename="solution.txt",
        ...                       data_filename="my_ray.h5",
        ...                       fields=["temperature", "density"],
        ...                       use_peculiar_velocity=True)

        """
        if self.simulation_type is None:
            domain = self.ds
        else:
            domain = self.simulation

        assumed_units = "code_length"
        if left_edge is None:
            left_edge = domain.domain_left_edge
        elif not hasattr(left_edge, 'units'):
            left_edge = domain.arr(left_edge, assumed_units)
        left_edge.convert_to_units('unitary')

        if right_edge is None:
            right_edge = domain.domain_right_edge
        elif not hasattr(right_edge, 'units'):
            right_edge = domain.arr(right_edge, assumed_units)
        right_edge.convert_to_units('unitary')

        if start_position is not None:
            if hasattr(start_position, 'units'):
                start_position = start_position
            else:
                start_position = self.ds.arr(start_position, assumed_units)
            start_position.convert_to_units('unitary')

        if end_position is not None:
            if hasattr(end_position, 'units'):
                end_position = end_position
            else:
                end_position = self.ds.arr(end_position, assumed_units)
            end_position.convert_to_units('unitary')

        if get_los_velocity is not None:
            use_peculiar_velocity = get_los_velocity
            mylog.warn("'get_los_velocity' kwarg is deprecated. " + \
                       "Use 'use_peculiar_velocity' instead.")

        # Calculate solution.
        self._calculate_light_ray_solution(seed=seed,
                                           left_edge=left_edge,
                                           right_edge=right_edge,
                                           min_level=min_level, periodic=periodic,
                                           start_position=start_position,
                                           end_position=end_position,
                                           trajectory=trajectory,
                                           filename=solution_filename)

        if field_parameters is None:
            field_parameters = {}

        # Initialize data structures.
        self._data = {}
        # temperature field is automatically added to fields
        if fields is None: fields = []
        if (('gas', 'temperature') not in fields) and \
           ('temperature' not in fields):
           fields.append(('gas', 'temperature'))
        data_fields = fields[:]
        all_fields = fields[:]
        all_fields.extend(['dl', 'dredshift', 'redshift'])
        all_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
        data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
        if use_peculiar_velocity:
            all_fields.extend(['velocity_x', 'velocity_y', 'velocity_z', 
                               'velocity_los', 'redshift_eff', 
                               'redshift_dopp'])
            data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])

        all_ray_storage = {}
        for my_storage, my_segment in parallel_objects(self.light_ray_solution,
                                                       storage=all_ray_storage,
                                                       njobs=njobs):

            # In case of simple rays, use the already loaded dataset: self.ds, 
            # otherwise, load dataset for segment.
            if self.ds is None:
                ds = load(my_segment['filename'], **self.load_kwargs)
            else:
                ds = self.ds

            my_segment['unique_identifier'] = ds.unique_identifier
            if redshift is not None:
                if ds.cosmological_simulation and redshift != ds.current_redshift:
                    mylog.warn("Generating light ray with different redshift than " +
                               "the dataset itself.")
                my_segment["redshift"] = redshift

            if setup_function is not None:
                setup_function(ds)

            if not ds.cosmological_simulation:
                next_redshift = my_segment["redshift"]
            elif self.near_redshift == self.far_redshift:
                if isinstance(my_segment["traversal_box_fraction"], YTArray) and \
                  not my_segment["traversal_box_fraction"].units.is_dimensionless:
                    segment_length = \
                      my_segment["traversal_box_fraction"].in_units("Mpccm / h")
                else:
                    segment_length = my_segment["traversal_box_fraction"] * \
                      ds.domain_width[0].in_units("Mpccm / h")
                next_redshift = my_segment["redshift"] - \
                  self._deltaz_forward(my_segment["redshift"],
                                       segment_length)
            elif my_segment.get("next", None) is None:
                next_redshift = self.near_redshift
            else:
                next_redshift = my_segment['next']['redshift']

            # Make sure start, end, left, right
            # are using the dataset's unit system.
            my_start = ds.arr(my_segment['start'])
            my_end   = ds.arr(my_segment['end'])
            my_left  = ds.arr(left_edge)
            my_right = ds.arr(right_edge)
            mylog.info("Getting segment at z = %s: %s to %s." %
                       (my_segment['redshift'], my_start, my_end))

            # Break periodic ray into non-periodic segments.
            sub_segments = periodic_ray(my_start, my_end,
                                        left=my_left, right=my_right)

            # Prepare data structure for subsegment.
            sub_data = {}
            sub_data['segment_redshift'] = my_segment['redshift']
            for field in all_fields:
                sub_data[field] = []

            # Get data for all subsegments in segment.
            for sub_segment in sub_segments:
                mylog.info("Getting subsegment: %s to %s." %
                           (list(sub_segment[0]), list(sub_segment[1])))
                sub_ray = ds.ray(sub_segment[0], sub_segment[1])
                for key, val in field_parameters.items():
                    sub_ray.set_field_parameter(key, val)
                asort = np.argsort(sub_ray["t"])
                sub_data['dl'].extend(sub_ray['dts'][asort] *
                                      vector_length(sub_ray.start_point,
                                                    sub_ray.end_point))

                for field in data_fields:
                    sub_data[field].extend(sub_ray[field][asort])

                if use_peculiar_velocity:
                    line_of_sight = sub_segment[0] - sub_segment[1]
                    line_of_sight /= ((line_of_sight**2).sum())**0.5
                    sub_vel = ds.arr([sub_ray['velocity_x'],
                                      sub_ray['velocity_y'],
                                      sub_ray['velocity_z']])
                    # Line of sight velocity = vel_los
                    sub_vel_los = (np.rollaxis(sub_vel, 1) * \
                                   line_of_sight).sum(axis=1)
                    sub_data['velocity_los'].extend(sub_vel_los[asort])

                    # doppler redshift:
                    # See https://en.wikipedia.org/wiki/Redshift and 
                    # Peebles eqns: 5.48, 5.49

                    # 1 + redshift_dopp = (1 + v*cos(theta)/c) / 
                    # sqrt(1 - v**2/c**2)

                    # where v is the peculiar velocity (ie physical velocity
                    # without the hubble flow, but no hubble flow in sim, so
                    # just the physical velocity).

                    # the bulk of the doppler redshift is from line of sight 
                    # motion, but there is a small amount from time dilation 
                    # of transverse motion, hence the inclusion of theta (the 
                    # angle between line of sight and the velocity). 
                    # theta is the angle between the ray vector (i.e. line of 
                    # sight) and the velocity vectors: a dot b = ab cos(theta)

                    sub_vel_mag = sub_ray['velocity_magnitude']
                    cos_theta = line_of_sight.dot(sub_vel) / sub_vel_mag
                    # Protect against stituations where velocity mag is exactly
                    # zero, in which case zero / zero = NaN.
                    cos_theta = np.nan_to_num(cos_theta)
                    redshift_dopp = \
                        (1 + sub_vel_mag * cos_theta / speed_of_light_cgs) / \
                         np.sqrt(1 - sub_vel_mag**2 / speed_of_light_cgs**2) - 1
                    sub_data['redshift_dopp'].extend(redshift_dopp[asort])
                    del sub_vel, sub_vel_los, sub_vel_mag, cos_theta, \
                        redshift_dopp

                sub_ray.clear_data()
                del sub_ray, asort

            for key in sub_data:
                sub_data[key] = ds.arr(sub_data[key]).in_cgs()

            # Get redshift for each lixel.  Assume linear relation between l 
            # and z.
            sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
                (sub_data['dl'] / vector_length(my_start, my_end).in_cgs())
            sub_data['redshift'] = my_segment['redshift'] - \
              sub_data['dredshift'].cumsum() + sub_data['dredshift']

            # When using the peculiar velocity, create effective redshift 
            # (redshift_eff) field combining cosmological redshift and 
            # doppler redshift.
            
            # then to add cosmological redshift and doppler redshifts, follow
            # eqn 3.75 in Peacock's Cosmological Physics:
            # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)

            if use_peculiar_velocity:
               sub_data['redshift_eff'] = ((1 + sub_data['redshift_dopp']) * \
                                            (1 + sub_data['redshift'])) - 1

            # Remove empty lixels.
            sub_dl_nonzero = sub_data['dl'].nonzero()
            for field in all_fields:
                sub_data[field] = sub_data[field][sub_dl_nonzero]
            del sub_dl_nonzero

            # Add to storage.
            my_storage.result = sub_data

            del ds

        # Reconstruct ray data from parallel_objects storage.
        all_data = [my_data for my_data in all_ray_storage.values()]
        # This is now a list of segments where each one is a dictionary
        # with all the fields.
        all_data.sort(key=lambda a:a['segment_redshift'], reverse=True)
        # Flatten the list into a single dictionary containing fields
        # for the whole ray.
        all_data = _flatten_dict_list(all_data, exceptions=['segment_redshift'])
        self._data = all_data

        if data_filename is not None:
            self._write_light_ray(data_filename, all_data)
            ray_ds = load(data_filename)
            return ray_ds
        else:
            return None
Example #26
0
    def make_spectrum(self,
                      ray,
                      lines='all',
                      output_file=None,
                      output_absorbers_file=None,
                      use_peculiar_velocity=True,
                      observing_redshift=0.0,
                      ly_continuum=True,
                      store_observables=False,
                      min_tau=1e-3,
                      njobs="auto"):
        """
        Make a spectrum from ray data depositing the desired lines.  Make sure
        to pass this function a LightRay object and potentially also a list of
        strings representing what lines you'd like to actually have be
        deposited in your final spectrum.

        **Parameters**

        :ray: string, dataset, or data container

            If a string, the path to the ray dataset. As a dataset,
            this is the ray dataset loaded by yt. As a data container,
            this is a data object created from a ray dataset, such as
            a cut region.

        :lines: list of strings

            List of strings that determine which lines will be added
            to the spectrum.  List can include things like "C", "O VI",
            or "Mg II ####", where #### would be the integer wavelength
            value of the desired line.  If set to 'all', includes all lines
            in LineDatabase set in SpectrumGenerator.
            Default: 'all'

        :output_file: optional, string

            Filename of output if you wish to save the spectrum immediately
            without any further processing. File formats are chosen based on the
            filename extension.  ".h5" for HDF5, ".fits" for FITS,
            and everything else is ASCII.  Equivalent of calling
            :class:`~trident.SpectrumGenerator.save_spectrum`.
            Default: None

        :output_absorbers_file: optional, string

           Option to save a text file containing all of the absorbers and
           corresponding wavelength and redshift information.
           For parallel jobs, combining the lines lists can be slow so it
           is recommended to set to None in such circumstances.
           Default: None

        :use_peculiar_velocity: optional, bool

            If True, include the effects of doppler redshift of the gas
            in shifting lines in the final spectrum.
            Default: True

        :observing_redshift: optional, float

            This is the value of the redshift at which the observer of this
            spectrum exists.  In most cases, this will be a redshift of 0.
            Default: 0.

        :ly_continuum: optional, boolean

            If any H I lines are used in the line list, this assures a
            Lyman continuum will be included in the spectral generation.
            Lyman continuum begins at final Lyman line deposited (Ly 39 =
            912.32 A) not at formal Lyman Limit (911.76 A) so as to not have
            a gap between final Lyman lines and continuum.  Uses power law
            of index 3 and normalization to match opacity of final Lyman lines.
            Default: True

        :store_observables: optional, boolean

            If set to true, observable properties for each cell in the light
            ray will be saved for each line in the line list. Properties
            include the column density, tau, thermal b, and the wavelength
            where tau was deposited. Best applied for a reasonable number
            of lines.  These quantities will be saved to the SpectrumGenerator
            attribute: 'line_observables_dict'.
            Default: False

        :min_tau: optional, float
           This value determines size of the wavelength window used to
           deposit lines or continua.  The wavelength window is expanded
           until the optical depth at the edge is below this value.  If too
           high, this will result in features appearing cut off at the edges.
           Decreasing this will make features smoother but will also increase
           run time.  An increase by a factor of ten will result in roughly a
           2x slow down.
           Default: 1e-3.

        :njobs: optional, int or "auto"

            The number of process groups into which the loop over
            absorption lines will be divided.  If set to -1, each
            absorption line will be deposited by exactly one processor.
            If njobs is set to a value less than the total number of
            available processors (N), then the deposition of an
            individual line will be parallelized over (N / njobs)
            processors.  If set to "auto", it will first try to
            parallelize over the list of lines and only parallelize
            the line deposition if there are more processors than
            lines.  This is the optimal strategy for parallelizing
            spectrum generation.
            Default: "auto"

        **Example**

        Make a one zone ray and generate a COS spectrum for it including
        only Oxygen VI, Mg II, and all Carbon lines, and plot to disk.

        >>> import trident
        >>> ray = trident.make_onezone_ray()
        >>> sg = trident.SpectrumGenerator('COS')
        >>> sg.make_spectrum(ray, lines=['O VI', 'Mg II', 'C'])
        >>> sg.plot_spectrum('spec_raw.png')
        """
        self.observing_redshift = observing_redshift

        if isinstance(ray, str):
            ray = load(ray)
        if isinstance(ray, Dataset):
            ad = ray.all_data()
        elif isinstance(ray, YTDataContainer):
            ad = ray
            ray = ad.ds
        else:
            raise RuntimeError("Unrecognized ray type.")

        # Clear out any previous spectrum that existed first
        self.clear_spectrum()

        active_lines = self.line_database.parse_subset(lines)

        # Make sure we've produced all the necessary
        # derived fields if they aren't native to the data
        for line in active_lines:
            # if successful, means line.field is in ds.derived_field_list
            try:
                ad._determine_fields(line.field)[0]
            # otherwise we probably need to add the field to the dataset
            except BaseException:
                my_ion = \
                  line.field[:line.field.find("number_density")]
                on_ion = my_ion.split("_")
                # Add the field if greater than level 1 ionization
                # because there is only one naming convention for these fields:
                # X_pY_number_density
                if on_ion[1]:
                    my_lev = int(on_ion[1][1:]) + 1
                    mylog.info("Creating %s from ray's density, "
                               "temperature, metallicity." % (line.field))
                    add_ion_number_density_field(
                        on_ion[0],
                        my_lev,
                        ray,
                        ionization_table=self.ionization_table)
                # If level 1 ionization, check to see if other name for
                # field is present in dataset
                else:
                    my_lev = 1
                    alias_field = ('gas',
                                   "".join([my_ion, 'p0_number_density']))
                    # Don't add the X_number_density if X_p0_number_density is
                    # in dataset already
                    if alias_field in ray.derived_field_list:
                        line.field = alias_field
                    # But add the field if neither X_number_density nor
                    # X_p0_number_density is in the dataset
                    else:
                        mylog.info("Creating %s from ray's density, "
                                   "temperature, metallicity." % (line.field))
                        add_ion_number_density_field(
                            on_ion[0],
                            my_lev,
                            ray,
                            ionization_table=self.ionization_table)

            self.add_line(line.identifier,
                          line.field,
                          float(line.wavelength),
                          float(line.f_value),
                          float(line.gamma),
                          atomic_mass[line.element],
                          label_threshold=1e3)

        # If there are H I lines present, add a Lyman continuum source
        # Lyman continuum source starts at wavelength where last Lyman line
        # is deposited (Ly 40), as opposed to true Lyman Limit at 911.763 A
        # so there won't be a gap between lines and continuum.  Using
        # power law of index 3.0 and normalization to match the opacity of
        # the final Lyman line into the FUV.
        H_lines = self.line_database.select_lines(source_list=active_lines,
                                                  element='H',
                                                  ion_state='I')
        if len(H_lines) > 0 and ly_continuum:
            self.add_continuum('Ly C', H_lines[0].field, 912.32336, 1.6e17,
                               3.0)

        AbsorptionSpectrum.make_spectrum(
            self,
            ad,
            output_file=None,
            line_list_file=None,
            output_absorbers_file=output_absorbers_file,
            use_peculiar_velocity=use_peculiar_velocity,
            observing_redshift=observing_redshift,
            store_observables=store_observables,
            min_tau=min_tau,
            njobs=njobs)
Example #27
0
    def project_light_cone(self,
                           field_of_view,
                           image_resolution,
                           field,
                           weight_field=None,
                           photon_field=False,
                           save_stack=True,
                           save_final_image=True,
                           save_slice_images=False,
                           cmap_name=None,
                           njobs=1,
                           dynamic=False):
        r"""Create projections for light cone, then add them together.

        Parameters
        ----------
        field_of_view : YTQuantity or tuple of (float, str)
            The field of view of the image and the units.
        image_resolution : YTQuantity or tuple of (float, str)
            The size of each image pixel and the units.
        field : string
            The projected field.
        weight_field : string
            the weight field of the projection.  This has the same meaning as
            in standard projections.
            Default: None.
        photon_field : bool
            if True, the projection data for each slice is decremented by 4 Pi
            R^2`, where R is the luminosity distance between the observer and
            the slice redshift.
            Default: False.
        save_stack : bool
            if True, the light cone data including each individual
            slice is written to an hdf5 file.
            Default: True.
        save_final_image : bool
            if True, save an image of the final light cone projection.
            Default: True.
        save_slice_images : bool
            save images for each individual projection slice.
            Default: False.
        cmap_name : string
            color map for images.
            Default: your default colormap.
        njobs : int
            The number of parallel jobs over which the light cone projection
            will be split.  Choose -1 for one processor per individual
            projection and 1 to have all processors work together on each
            projection.
            Default: 1.
        dynamic : bool
            If True, use dynamic load balancing to create the projections.
            Default: False.

        """

        if cmap_name is None:
            cmap_name = ytcfg.get("yt", "default_colormap")

        if isinstance(field_of_view, tuple) and len(field_of_view) == 2:
            field_of_view = self.simulation.quan(field_of_view[0],
                                                 field_of_view[1])
        elif not isinstance(field_of_view, YTArray):
            raise RuntimeError(
                "field_of_view argument must be either a YTQauntity " +
                "or a tuple of type (float, str).")
        if isinstance(image_resolution, tuple) and len(image_resolution) == 2:
            image_resolution = self.simulation.quan(image_resolution[0],
                                                    image_resolution[1])
        elif not isinstance(image_resolution, YTArray):
            raise RuntimeError(
                "image_resolution argument must be either a YTQauntity " +
                "or a tuple of type (float, str).")

        # Calculate number of pixels on a side.
        pixels = int((field_of_view / image_resolution).in_units(""))

        # Clear projection stack.
        projection_stack = []
        projection_weight_stack = []
        if "object" in self.light_cone_solution[-1]:
            del self.light_cone_solution[-1]["object"]

        # for q, output in enumerate(self.light_cone_solution):
        all_storage = {}
        for my_storage, output in parallel_objects(self.light_cone_solution,
                                                   storage=all_storage,
                                                   dynamic=dynamic):
            output["object"] = load(output["filename"])
            output["object"].parameters.update(self.set_parameters)

            # Calculate fraction of box required for width corresponding to
            # requested image size.
            proper_box_size = self.simulation.box_size / \
              (1.0 + output["redshift"])
            output["box_width_fraction"] = (output["box_width_per_angle"] *
                                            field_of_view).in_units("")

            frb = _light_cone_projection(output,
                                         field,
                                         pixels,
                                         weight_field=weight_field)

            if photon_field:
                # Decrement the flux by the luminosity distance.
                # Assume field in frb is in erg/s/cm^2/Hz
                dL = self.cosmology.luminosity_distance(
                    self.observer_redshift, output["redshift"])
                proper_box_size = self.simulation.box_size / \
                  (1.0 + output["redshift"])
                pixel_area = (proper_box_size.in_cgs() /
                              pixels)**2  #in proper cm^2
                factor = pixel_area / (4.0 * np.pi * dL.in_cgs()**2)
                mylog.info("Distance to slice = %s" % dL)
                frb[field] *= factor  #in erg/s/cm^2/Hz on observer"s image plane.

            if weight_field is None:
                my_storage.result = {"field": frb[field]}
            else:
                my_storage.result = {
                    "field": (frb[field] * frb["weight_field"]),
                    "weight_field": frb["weight_field"]
                }

            del output["object"]

        # Combine results from each slice.
        all_slices = list(all_storage.keys())
        all_slices.sort()
        for my_slice in all_slices:
            if save_slice_images:
                name = os.path.join(
                    self.output_dir,
                    "%s_%04d_%04d" % (self.output_prefix, my_slice,
                                      len(self.light_cone_solution)))
                if weight_field is None:
                    my_image = all_storage[my_slice]["field"]
                else:
                    my_image = all_storage[my_slice]["field"] / \
                      all_storage[my_slice]["weight_field"]
                only_on_root(write_image,
                             np.log10(my_image),
                             "%s_%s.png" % (name, field),
                             cmap_name=cmap_name)

            projection_stack.append(all_storage[my_slice]["field"])
            if weight_field is not None:
                projection_weight_stack.append(all_storage[my_slice]["field"])

        projection_stack = self.simulation.arr(projection_stack)
        projection_weight_stack = self.simulation.arr(projection_weight_stack)

        # Add up slices to make light cone projection.
        if (weight_field is None):
            light_cone_projection = projection_stack.sum(axis=0)
        else:
            light_cone_projection = \
              projection_stack.sum(axis=0) / \
              self.simulation.arr(projection_weight_stack).sum(axis=0)

        filename = os.path.join(self.output_dir, self.output_prefix)

        # Write image.
        if save_final_image:
            only_on_root(write_image,
                         np.log10(light_cone_projection),
                         "%s_%s.png" % (filename, field),
                         cmap_name=cmap_name)

        # Write stack to hdf5 file.
        if save_stack:
            self._save_light_cone_stack(field,
                                        weight_field,
                                        projection_stack,
                                        projection_weight_stack,
                                        filename=filename,
                                        attrs={
                                            "field_of_view":
                                            str(field_of_view),
                                            "image_resolution":
                                            str(image_resolution)
                                        })
Example #28
0
def test_fits_image():
    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    fields = ("density", "temperature")
    units = (
        'g/cm**3',
        'K',
    )
    ds = fake_random_ds(64,
                        fields=fields,
                        units=units,
                        nprocs=16,
                        length_unit=100.0)

    prj = ds.proj("density", 2)
    prj_frb = prj.to_frb((0.5, "unitary"), 128)

    fid1 = prj_frb.to_fits_data(fields=[("gas", "density"),
                                        ("gas", "temperature")],
                                length_unit="cm")
    fits_prj = FITSProjection(ds,
                              "z", [ds.fields.gas.density, "temperature"],
                              image_res=128,
                              width=(0.5, "unitary"))

    assert_equal(fid1["density"].data, fits_prj["density"].data)
    assert_equal(fid1["temperature"].data, fits_prj["temperature"].data)

    fid1.writeto("fid1.fits", overwrite=True)
    new_fid1 = FITSImageData.from_file("fid1.fits")

    assert_equal(fid1["density"].data, new_fid1["density"].data)
    assert_equal(fid1["temperature"].data, new_fid1["temperature"].data)
    assert_equal(fid1.length_unit, new_fid1.length_unit)
    assert_equal(fid1.time_unit, new_fid1.time_unit)
    assert_equal(fid1.mass_unit, new_fid1.mass_unit)
    assert_equal(fid1.velocity_unit, new_fid1.velocity_unit)
    assert_equal(fid1.magnetic_unit, new_fid1.magnetic_unit)
    assert_equal(fid1.current_time, new_fid1.current_time)

    ds2 = load("fid1.fits")
    ds2.index

    assert ("fits", "density") in ds2.field_list
    assert ("fits", "temperature") in ds2.field_list

    dw_cm = ds2.domain_width.in_units("cm")

    assert dw_cm[0].v == 50.
    assert dw_cm[1].v == 50.

    slc = ds.slice(2, 0.5)
    slc_frb = slc.to_frb((0.5, "unitary"), 128)

    fid2 = slc_frb.to_fits_data(fields=[("gas", "density"),
                                        ("gas", "temperature")],
                                length_unit="cm")
    fits_slc = FITSSlice(ds,
                         "z", [("gas", "density"), ("gas", "temperature")],
                         image_res=128,
                         width=(0.5, "unitary"))

    assert_equal(fid2["density"].data, fits_slc["density"].data)
    assert_equal(fid2["temperature"].data, fits_slc["temperature"].data)

    dens_img = fid2.pop("density")
    temp_img = fid2.pop("temperature")

    combined_fid = FITSImageData.from_images([dens_img, temp_img])
    assert_equal(combined_fid.length_unit, dens_img.length_unit)
    assert_equal(combined_fid.time_unit, dens_img.time_unit)
    assert_equal(combined_fid.mass_unit, dens_img.mass_unit)
    assert_equal(combined_fid.velocity_unit, dens_img.velocity_unit)
    assert_equal(combined_fid.magnetic_unit, dens_img.magnetic_unit)
    assert_equal(combined_fid.current_time, dens_img.current_time)

    cut = ds.cutting([0.1, 0.2, -0.9], [0.5, 0.42, 0.6])
    cut_frb = cut.to_frb((0.5, "unitary"), 128)

    fid3 = cut_frb.to_fits_data(fields=[("gas", "density"),
                                        ds.fields.gas.temperature],
                                length_unit="cm")
    fits_cut = FITSOffAxisSlice(ds, [0.1, 0.2, -0.9],
                                ["density", "temperature"],
                                image_res=128,
                                center=[0.5, 0.42, 0.6],
                                width=(0.5, "unitary"))

    assert_equal(fid3["density"].data, fits_cut["density"].data)
    assert_equal(fid3["temperature"].data, fits_cut["temperature"].data)

    fid3.create_sky_wcs([30., 45.], (1.0, "arcsec/kpc"))
    fid3.writeto("fid3.fits", overwrite=True)
    new_fid3 = FITSImageData.from_file("fid3.fits")
    assert_same_wcs(fid3.wcs, new_fid3.wcs)
    assert new_fid3.wcs.wcs.cunit[0] == "deg"
    assert new_fid3.wcs.wcs.cunit[1] == "deg"
    assert new_fid3.wcs.wcs.ctype[0] == "RA---TAN"
    assert new_fid3.wcs.wcs.ctype[1] == "DEC--TAN"

    buf = off_axis_projection(ds, ds.domain_center, [0.1, 0.2, -0.9], 0.5, 128,
                              "density").swapaxes(0, 1)
    fid4 = FITSImageData(buf, fields="density", width=100.0)
    fits_oap = FITSOffAxisProjection(ds, [0.1, 0.2, -0.9],
                                     "density",
                                     width=(0.5, "unitary"),
                                     image_res=128,
                                     depth=(0.5, "unitary"))

    assert_equal(fid4["density"].data, fits_oap["density"].data)

    fid4.create_sky_wcs([30., 45.], (1.0, "arcsec/kpc"), replace_old_wcs=False)
    assert fid4.wcs.wcs.cunit[0] == "cm"
    assert fid4.wcs.wcs.cunit[1] == "cm"
    assert fid4.wcs.wcs.ctype[0] == "linear"
    assert fid4.wcs.wcs.ctype[1] == "linear"
    assert fid4.wcsa.wcs.cunit[0] == "deg"
    assert fid4.wcsa.wcs.cunit[1] == "deg"
    assert fid4.wcsa.wcs.ctype[0] == "RA---TAN"
    assert fid4.wcsa.wcs.ctype[1] == "DEC--TAN"

    cvg = ds.covering_grid(ds.index.max_level, [0.25, 0.25, 0.25],
                           [32, 32, 32],
                           fields=["density", "temperature"])
    fid5 = cvg.to_fits_data(fields=["density", "temperature"])
    assert fid5.dimensionality == 3

    fid5.update_header("density", "time", 0.1)
    fid5.update_header("all", "units", "cgs")

    assert fid5["density"].header["time"] == 0.1
    assert fid5["temperature"].header["units"] == "cgs"
    assert fid5["density"].header["units"] == "cgs"

    fid6 = FITSImageData.from_images(fid5)

    fid5.change_image_name("density", "mass_per_volume")
    assert fid5["mass_per_volume"].name == "mass_per_volume"
    assert fid5["mass_per_volume"].header["BTYPE"] == "mass_per_volume"
    assert "mass_per_volume" in fid5.fields
    assert "mass_per_volume" in fid5.field_units
    assert "density" not in fid5.fields
    assert "density" not in fid5.field_units

    assert "density" in fid6.fields
    assert_equal(fid6["density"].data, fid5["mass_per_volume"].data)

    fid7 = FITSImageData.from_images(fid4)
    fid7.convolve("density", (3.0, "cm"))

    sigma = 3.0 / fid7.wcs.wcs.cdelt[0]
    kernel = _astropy.conv.Gaussian2DKernel(x_stddev=sigma)
    data_conv = _astropy.conv.convolve(fid4["density"].data.d, kernel)
    assert_allclose(data_conv, fid7["density"].data.d)

    os.chdir(curdir)
    shutil.rmtree(tmpdir)