예제 #1
0
파일: event_list.py 프로젝트: ilaudy/pyxsim
    def add_background(self, energy_bins, spectrum,
                       prng=None, absorb_model=None):
        r"""
        Add background events to an :class:`~pyxsim.event_list.EventList`.
        Returns a new :class:`~pyxsim.event_list.EventList`.

        Parameters
        ----------
        energy_bins : :class:`~yt.units.yt_array.YTArray` with units of keV, size M+1
            The edges of the energy bins for the spectra, where M is the number of
            bins
        spectrum : :class:`~yt.units.yt_array.YTArray` with units of photons/s/cm**2, size M
            The spectrum for the background, where M is the number of bins.
        prng : :class:`~numpy.random.RandomState` object or :mod:`numpy.random`, optional
            A pseudo-random number generator. Typically will only be specified
            if you have a reason to generate the same set of random numbers, such as for a
            test. Default is the :mod:`numpy.random` module.
        absorb_model : :class:`~pyxsim.spectral_models.AbsorptionModel` 
            A model for foreground galactic absorption.
        """
        if prng is None:
            prng = np.random

        eobs = self._add_events(energy_bins, spectrum, prng, absorb_model)
        ne = len(eobs)
        x = np.random.uniform(low=0.5, high=2.*self.parameters["pix_center"][0]-0.5, size=ne)
        y = np.random.uniform(low=0.5, high=2.*self.parameters["pix_center"][1]-0.5, size=ne)

        events = {}
        events["xpix"] = uconcatenate([x, self.events["xpix"]])
        events["ypix"] = uconcatenate([y, self.events["ypix"]])
        events["eobs"] = uconcatenate([eobs, self.events["eobs"]])

        return EventList(events, self.parameters)
예제 #2
0
def make_point_sources(area, exp_time, positions, sky_center,
                       spectra, prng=None):
    r"""
    Create a new :class:`~pyxsim.event_list.EventList` which contains
    point sources.

    Parameters
    ----------
    area : float, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity`
        The collecting area to determine the number of events. If units are
        not specified, it is assumed to be in cm^2.
    exp_time : float, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity`
        The exposure time to determine the number of events. If units are
        not specified, it is assumed to be in seconds.
    positions : array of source positions, shape 2xN
        The positions of the point sources in RA, Dec, where N is the
        number of point sources. Coordinates should be in degrees.
    sky_center : array-like
        Center RA, Dec of the events in degrees.
    spectra : list (size N) of :class:`~soxs.spectra.Spectrum` objects
        The spectra for the point sources, where N is the number 
        of point sources. Assumed to be in the observer frame.
    prng : integer or :class:`~numpy.random.RandomState` object 
        A pseudo-random number generator. Typically will only be specified
        if you have a reason to generate the same set of random numbers, such as for a
        test. Default is to use the :mod:`numpy.random` module.
    """
    prng = parse_prng(prng)

    spectra = ensure_list(spectra)
    positions = ensure_list(positions)

    area = parse_value(area, "cm**2")
    exp_time = parse_value(exp_time, "s")

    t_exp = exp_time.value/comm.size

    x = []
    y = []
    e = []

    for pos, spectrum in zip(positions, spectra):
        eobs = spectrum.generate_energies(t_exp, area.value, prng=prng)
        ne = eobs.size
        x.append(YTArray([pos[0]] * ne, "deg"))
        y.append(YTArray([pos[1]] * ne, "deg"))
        e.append(YTArray.from_astropy(eobs))

    parameters = {"sky_center": YTArray(sky_center, "degree"),
                  "exp_time": exp_time,
                  "area": area}

    events = {}
    events["xsky"] = uconcatenate(x)
    events["ysky"] = uconcatenate(y)
    events["eobs"] = uconcatenate(e)

    return EventList(events, parameters)
예제 #3
0
def make_point_sources(area, exp_time, positions, sky_center,
                       spectra, prng=None):
    r"""
    Create a new :class:`~pyxsim.event_list.EventList` which contains
    point sources.

    Parameters
    ----------
    area : float, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity`
        The collecting area to determine the number of events. If units are
        not specified, it is assumed to be in cm^2.
    exp_time : float, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity`
        The exposure time to determine the number of events. If units are
        not specified, it is assumed to be in seconds.
    positions : array of source positions, shape 2xN
        The positions of the point sources in RA, Dec, where N is the
        number of point sources. Coordinates should be in degrees.
    sky_center : array-like
        Center RA, Dec of the events in degrees.
    spectra : list (size N) of :class:`~soxs.spectra.Spectrum` objects
        The spectra for the point sources, where N is the number 
        of point sources. Assumed to be in the observer frame.
    prng : integer or :class:`~numpy.random.RandomState` object 
        A pseudo-random number generator. Typically will only be specified
        if you have a reason to generate the same set of random numbers, such as for a
        test. Default is to use the :mod:`numpy.random` module.
    """
    prng = parse_prng(prng)

    spectra = ensure_list(spectra)
    positions = ensure_list(positions)

    area = parse_value(area, "cm**2")
    exp_time = parse_value(exp_time, "s")

    t_exp = exp_time.value/comm.size

    x = []
    y = []
    e = []

    for pos, spectrum in zip(positions, spectra):
        eobs = spectrum.generate_energies(t_exp, area.value, prng=prng)
        ne = eobs.size
        x.append(YTArray([pos[0]] * ne, "degree"))
        y.append(YTArray([pos[1]] * ne, "degree"))
        e.append(YTArray.from_astropy(eobs))

    parameters = {"sky_center": YTArray(sky_center, "degree"),
                  "exp_time": exp_time,
                  "area": area}

    events = {}
    events["eobs"] = uconcatenate(e)
    events["xsky"] = uconcatenate(x)
    events["ysky"] = uconcatenate(y)

    return EventList(events, parameters)
예제 #4
0
    def add_point_sources(self,
                          positions,
                          energy_bins,
                          spectra,
                          prng=None,
                          absorb_model=None):
        r"""
        Add point source events to an :class:`~pyxsim.event_list.EventList`.
        Returns a new :class:`~pyxsim.event_list.EventList`.

        Parameters
        ----------
        positions : array of source positions, shape 2xN
            The positions of the point sources in RA, Dec, where N is the
            number of point sources. Coordinates should be in degrees.
        energy_bins : :class:`~yt.units.yt_array.YTArray` with units of keV, shape M+1
            The edges of the energy bins for the spectra, where M is the number of
            bins
        spectra : list (size N) of :class:`~yt.units.yt_array.YTArray`\s with units of photons/s/cm**2, each with shape M
            The spectra for the point sources, where M is the number of bins and N is
            the number of point sources
        prng : :class:`~numpy.random.RandomState` object or :mod:`numpy.random`, optional
            A pseudo-random number generator. Typically will only be specified
            if you have a reason to generate the same set of random numbers, such as for a
            test. Default is the :mod:`numpy.random` module.
        absorb_model : :class:`~pyxsim.spectral_models.AbsorptionModel`
            A model for foreground galactic absorption.
        """
        if prng is None:
            prng = np.random

        spectra = ensure_list(spectra)
        positions = ensure_list(positions)

        x = [self.events["xpix"]]
        y = [self.events["ypix"]]
        e = [self.events["eobs"]]

        for pos, spectrum in zip(positions, spectra):
            eobs = self._add_events(energy_bins, spectrum, prng, absorb_model)
            xpix, ypix = self.wcs.wcs_world2pix(pos[0], pos[1], 1)
            ne = len(eobs)
            x.append([xpix] * ne)
            y.append([ypix] * ne)
            e.append(eobs)

        events = {}
        events["xpix"] = uconcatenate(x)
        events["ypix"] = uconcatenate(y)
        events["eobs"] = uconcatenate(e)

        return EventList(events, self.parameters)
예제 #5
0
    def _parse_parameter_file(self):
        super()._parse_parameter_file()
        self.num_particles.pop(self.default_fluid_type, None)
        self.particle_types_raw = tuple(self.num_particles.keys())
        self.particle_types = self.particle_types_raw

        # correct domain dimensions for the covering grid dimension
        self.base_domain_left_edge = self.domain_left_edge
        self.base_domain_right_edge = self.domain_right_edge
        self.base_domain_dimensions = self.domain_dimensions

        if self.container_type in _grid_data_containers:
            self.domain_left_edge = self.parameters["left_edge"]

            if "level" in self.parameters["con_args"]:
                dx = (self.base_domain_right_edge - self.base_domain_left_edge) / (
                    self.domain_dimensions * self.refine_by ** self.parameters["level"]
                )
                self.domain_right_edge = (
                    self.domain_left_edge + self.parameters["ActiveDimensions"] * dx
                )
                self.domain_dimensions = (
                    (self.domain_right_edge - self.domain_left_edge) / dx
                ).astype(int)
            else:
                self.domain_right_edge = self.parameters["right_edge"]
                self.domain_dimensions = self.parameters["ActiveDimensions"]
                dx = (
                    self.domain_right_edge - self.domain_left_edge
                ) / self.domain_dimensions

            periodicity = (
                np.abs(self.domain_left_edge - self.base_domain_left_edge) < 0.5 * dx
            )
            periodicity &= (
                np.abs(self.domain_right_edge - self.base_domain_right_edge) < 0.5 * dx
            )
            self._periodicity = periodicity

        elif self.data_type == "yt_frb":
            dle = self.domain_left_edge
            self.domain_left_edge = uconcatenate(
                [self.parameters["left_edge"].to(dle.units), [0] * dle.uq]
            )
            dre = self.domain_right_edge
            self.domain_right_edge = uconcatenate(
                [self.parameters["right_edge"].to(dre.units), [1] * dre.uq]
            )
            self.domain_dimensions = np.concatenate(
                [self.parameters["ActiveDimensions"], [1]]
            )
예제 #6
0
def test_ray():
    for nproc in [1, 2, 4, 8]:
        ds = fake_random_ds(64, nprocs=nproc)
        dx = (ds.domain_right_edge - ds.domain_left_edge) / ds.domain_dimensions
        # Three we choose, to get varying vectors, and ten random
        pp1 = np.random.random((3, 13))
        pp2 = np.random.random((3, 13))
        pp1[:, 0] = [0.1, 0.2, 0.3]
        pp2[:, 0] = [0.8, 0.1, 0.4]
        pp1[:, 1] = [0.9, 0.2, 0.3]
        pp2[:, 1] = [0.8, 0.1, 0.4]
        pp1[:, 2] = [0.9, 0.2, 0.9]
        pp2[:, 2] = [0.8, 0.1, 0.4]
        unitary = ds.arr(1.0, "")
        for i in range(pp1.shape[1]):
            p1 = ds.arr(pp1[:, i] + 1e-8 * np.random.random(3), "code_length")
            p2 = ds.arr(pp2[:, i] + 1e-8 * np.random.random(3), "code_length")

            my_ray = ds.ray(p1, p2)
            assert_rel_equal(my_ray["dts"].sum(), unitary, 14)
            ray_cells = my_ray["dts"] > 0

            # find cells intersected by the ray
            my_all = ds.all_data()

            dt = np.abs(dx / (p2 - p1))
            tin = uconcatenate(
                [
                    [(my_all[("index", "x")] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
                    [(my_all[("index", "y")] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
                    [(my_all[("index", "z")] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]],
                ]
            )
            tout = uconcatenate(
                [
                    [(my_all[("index", "x")] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
                    [(my_all[("index", "y")] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
                    [(my_all[("index", "z")] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]],
                ]
            )
            tin = tin.max(axis=0)
            tout = tout.min(axis=0)
            my_cells = (tin < tout) & (tin < 1) & (tout > 0)

            assert_equal(ray_cells.sum(), my_cells.sum())
            assert_rel_equal(
                my_ray[("gas", "density")][ray_cells].sum(),
                my_all[("gas", "density")][my_cells].sum(),
                14,
            )
            assert_rel_equal(my_ray["dts"].sum(), unitary, 14)
def test_numpy_wrappers():
    a1 = YTArray([1, 2, 3], 'cm')
    a2 = YTArray([2, 3, 4, 5, 6], 'cm')
    a3 = YTArray([7, 8, 9, 10, 11], 'cm')
    catenate_answer = [1, 2, 3, 2, 3, 4, 5, 6]
    intersect_answer = [2, 3]
    union_answer = [1, 2, 3, 4, 5, 6]
    vstack_answer = [[2, 3, 4, 5, 6], [7, 8, 9, 10, 11]]

    assert_array_equal(YTArray(catenate_answer, 'cm'), uconcatenate((a1, a2)))
    assert_array_equal(catenate_answer, np.concatenate((a1, a2)))

    assert_array_equal(YTArray(intersect_answer, 'cm'), uintersect1d(a1, a2))
    assert_array_equal(intersect_answer, np.intersect1d(a1, a2))

    assert_array_equal(YTArray(union_answer, 'cm'), uunion1d(a1, a2))
    assert_array_equal(union_answer, np.union1d(a1, a2))

    assert_array_equal(YTArray(catenate_answer, 'cm'), uhstack([a1, a2]))
    assert_array_equal(catenate_answer, np.hstack([a1, a2]))

    assert_array_equal(YTArray(vstack_answer, 'cm'), uvstack([a2, a3]))
    assert_array_equal(vstack_answer, np.vstack([a2, a3]))

    assert_array_equal(YTArray(vstack_answer, 'cm'), ustack([a2, a3]))
    assert_array_equal(vstack_answer, np.stack([a2, a3]))
예제 #8
0
 def apply_to_stream(self, overwrite=False, **kwargs):
     """
     Apply the particles to a grid-based stream dataset. If particles
     already exist, and overwrite=False, do not overwrite them, but add
     the new ones to them.
     """
     grid_data = []
     for i, g in enumerate(self.ds.index.grids):
         data = {}
         number_of_particles = self.NumberOfParticles[i]
         if not overwrite:
             number_of_particles += g.NumberOfParticles
         grid_particles = self.get_for_grid(g)
         for field in self.field_list:
             if number_of_particles > 0:
                 if (g.NumberOfParticles > 0 and not overwrite
                         and field in self.ds.field_list):
                     # We have particles in this grid, we're not
                     # overwriting them, and the field is in the field
                     # list already
                     data[field] = uconcatenate(
                         [g[field], grid_particles[field]])
                 else:
                     # Otherwise, simply add the field in
                     data[field] = grid_particles[field]
             else:
                 # We don't have particles in this grid
                 data[field] = np.array([], dtype="float64")
         grid_data.append(data)
     self.ds.index.update_data(grid_data)
예제 #9
0
 def __add__(self, other):
     validate_parameters(self.parameters, other.parameters, skip=["sky_center"])
     events = {}
     for item1, item2 in zip(self.items(), other.items()):
         k1, v1 = item1
         k2, v2 = item2
         events[k1] = uconcatenate([v1,v2])
     return type(self)(events, dict(self.parameters))
예제 #10
0
def concatenate_photons(photons):
    for key in photons:
        if len(photons[key]) > 0:
            photons[key] = uconcatenate(photons[key])
        elif key == "NumberOfPhotons":
            photons[key] = np.array([])
        else:
            photons[key] = YTArray([], photon_units[key])
예제 #11
0
 def _AllFields(field, data):
     v = []
     for ptype in data.ds.particle_types:
         data.ds._last_freq = (ptype, None)
         if ptype == "all" or ptype in data.ds.known_filters:
             continue
         v.append(data[ptype, fname][:, axi])
     rv = uconcatenate(v, axis=0)
     return rv
예제 #12
0
파일: event_list.py 프로젝트: ilaudy/pyxsim
 def __add__(self, other):
     assert_same_wcs(self.wcs, other.wcs)
     validate_parameters(self.parameters, other.parameters)
     events = {}
     for item1, item2 in zip(self.items(), other.items()):
         k1, v1 = item1
         k2, v2 = item2
         events[k1] = uconcatenate([v1,v2])
     return EventList(events, self.parameters)
예제 #13
0
def test_ray():
    for nproc in [1, 2, 4, 8]:
        ds = fake_random_ds(64, nprocs=nproc)
        dx = (ds.domain_right_edge - ds.domain_left_edge) / \
          ds.domain_dimensions
        # Three we choose, to get varying vectors, and ten random
        pp1 = np.random.random((3, 13))
        pp2 = np.random.random((3, 13))
        pp1[:, 0] = [0.1, 0.2, 0.3]
        pp2[:, 0] = [0.8, 0.1, 0.4]
        pp1[:, 1] = [0.9, 0.2, 0.3]
        pp2[:, 1] = [0.8, 0.1, 0.4]
        pp1[:, 2] = [0.9, 0.2, 0.9]
        pp2[:, 2] = [0.8, 0.1, 0.4]
        unitary = ds.arr(1.0, '')
        for i in range(pp1.shape[1]):
            p1 = ds.arr(pp1[:, i] + 1e-8 * np.random.random(3), 'code_length')
            p2 = ds.arr(pp2[:, i] + 1e-8 * np.random.random(3), 'code_length')

            my_ray = ds.ray(p1, p2)
            yield assert_rel_equal, my_ray['dts'].sum(), unitary, 14
            ray_cells = my_ray['dts'] > 0

            # find cells intersected by the ray
            my_all = ds.all_data()

            dt = np.abs(dx / (p2 - p1))
            tin = uconcatenate([[
                (my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]
            ], [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]
                ], [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
            tout = uconcatenate([[
                (my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]
            ], [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]
                ], [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
            tin = tin.max(axis=0)
            tout = tout.min(axis=0)
            my_cells = (tin < tout) & (tin < 1) & (tout > 0)

            yield assert_equal, ray_cells.sum(), my_cells.sum()
            yield assert_rel_equal, my_ray['density'][ray_cells].sum(), \
                                    my_all['density'][my_cells].sum(), 14
            yield assert_rel_equal, my_ray['dts'].sum(), unitary, 14
 def _AllFields(field, data):
     v = []
     for ptype in data.ds.particle_types:
         data.ds._last_freq = (ptype, None)
         if ptype == "all" or \
             ptype in data.ds.known_filters:
               continue
         v.append(data[ptype, fname][:,axi])
     rv = uconcatenate(v, axis=0)
     return rv
예제 #15
0
 def __add__(self, other):
     validate_parameters(self.parameters,
                         other.parameters,
                         skip=["sky_center"])
     events = {}
     for item1, item2 in zip(self.items(), other.items()):
         k1, v1 = item1
         k2, v2 = item2
         events[k1] = uconcatenate([v1, v2])
     return type(self)(events, dict(self.parameters))
예제 #16
0
파일: io.py 프로젝트: jisuoqing/yt
 def _generate_smoothing_length(self, index):
     data_files = index.data_files
     if not self.ds.gen_hsmls:
         return
     hsml_fn = data_files[0].filename.replace(".hdf5", ".hsml.hdf5")
     if os.path.exists(hsml_fn):
         with h5py.File(hsml_fn, mode="r") as f:
             file_hash = f.attrs["q"]
         if file_hash != self.ds._file_hash:
             mylog.warning("Replacing hsml files.")
             for data_file in data_files:
                 hfn = data_file.filename.replace(".hdf5", ".hsml.hdf5")
                 os.remove(hfn)
         else:
             return
     positions = []
     counts = defaultdict(int)
     for data_file in data_files:
         for _, ppos in self._yield_coordinates(
             data_file, needed_ptype=self.ds._sph_ptypes[0]
         ):
             counts[data_file.filename] += ppos.shape[0]
             positions.append(ppos)
     if not positions:
         return
     offsets = {}
     offset = 0
     for fn, count in counts.items():
         offsets[fn] = offset
         offset += count
     kdtree = index.kdtree
     positions = uconcatenate(positions)[kdtree.idx]
     hsml = generate_smoothing_length(
         positions.astype("float64"), kdtree, self.ds._num_neighbors
     )
     dtype = positions.dtype
     hsml = hsml[np.argsort(kdtree.idx)].astype(dtype)
     mylog.warning("Writing smoothing lengths to hsml files.")
     for i, data_file in enumerate(data_files):
         si, ei = data_file.start, data_file.end
         fn = data_file.filename
         hsml_fn = data_file.filename.replace(".hdf5", ".hsml.hdf5")
         with h5py.File(hsml_fn, mode="a") as f:
             if i == 0:
                 f.attrs["q"] = self.ds._file_hash
             g = f.require_group(self.ds._sph_ptypes[0])
             d = g.require_dataset(
                 "SmoothingLength", dtype=dtype, shape=(counts[fn],)
             )
             begin = si + offsets[fn]
             end = min(ei, d.size) + offsets[fn]
             d[si:ei] = hsml[begin:end]
예제 #17
0
파일: test_chunking.py 프로젝트: tukss/yt
def test_chunking():
    for nprocs in [1, 2, 4, 8]:
        ds = fake_random_ds(64, nprocs=nprocs)
        c = (ds.domain_right_edge + ds.domain_left_edge) / 2.0
        c += ds.arr(0.5 / ds.domain_dimensions, "code_length")
        for dobj in _get_dobjs(c):
            obj = getattr(ds, dobj[0])(*dobj[1])
            coords = {"f": {}, "i": {}}
            for t in ["io", "all", "spatial"]:
                coords["i"][t] = []
                coords["f"][t] = []
                for chunk in obj.chunks(None, t):
                    coords["f"][t].append(chunk.fcoords[:, :])
                    coords["i"][t].append(chunk.icoords[:, :])
                coords["f"][t] = uconcatenate(coords["f"][t])
                coords["i"][t] = uconcatenate(coords["i"][t])
                coords["f"][t].sort()
                coords["i"][t].sort()
            assert_equal(coords["f"]["io"], coords["f"]["all"])
            assert_equal(coords["f"]["io"], coords["f"]["spatial"])
            assert_equal(coords["i"]["io"], coords["i"]["all"])
            assert_equal(coords["i"]["io"], coords["i"]["spatial"])
예제 #18
0
def test_numpy_wrappers():
    a1 = YTArray([1, 2, 3], 'cm')
    a2 = YTArray([2, 3, 4, 5, 6], 'cm')
    catenate_answer = [1, 2, 3, 2, 3, 4, 5, 6]
    intersect_answer = [2, 3]
    union_answer = [1, 2, 3, 4, 5, 6]

    assert_array_equal(YTArray(catenate_answer, 'cm'), uconcatenate((a1, a2)))
    assert_array_equal(catenate_answer, np.concatenate((a1, a2)))

    assert_array_equal(YTArray(intersect_answer, 'cm'), uintersect1d(a1, a2))
    assert_array_equal(intersect_answer, np.intersect1d(a1, a2))

    assert_array_equal(YTArray(union_answer, 'cm'), uunion1d(a1, a2))
    assert_array_equal(union_answer, np.union1d(a1, a2))
예제 #19
0
 def _accumulate_values(self, method):
     # We call this generically.  It's somewhat slower, since we're doing
     # costly getattr functions, but this allows us to generalize.
     mname = f"select_{method}"
     arrs = []
     for obj in self._fast_index or self.objs:
         f = getattr(obj, mname)
         arrs.append(f(self.dobj))
     if method == "dtcoords":
         arrs = [arr[0] for arr in arrs]
     elif method == "tcoords":
         arrs = [arr[1] for arr in arrs]
     arrs = uconcatenate(arrs)
     self.data_size = arrs.shape[0]
     return arrs
 def _accumulate_values(self, method):
     # We call this generically.  It's somewhat slower, since we're doing
     # costly getattr functions, but this allows us to generalize.
     mname = "select_%s" % method
     arrs = []
     for obj in self._fast_index or self.objs:
         f = getattr(obj, mname)
         arrs.append(f(self.dobj))
     if method == "dtcoords":
         arrs = [arr[0] for arr in arrs]
     elif method == "tcoords":
         arrs = [arr[1] for arr in arrs]
     arrs = uconcatenate(arrs)
     self.data_size = arrs.shape[0]
     return arrs
예제 #21
0
 def __add__(self, other):
     validate_parameters(self.parameters, other.parameters)
     for param in ["hubble_constant", "omega_matter", "omega_lambda",
                   "omega_curvature"]:
         v1 = getattr(self.cosmo, param)
         v2 = getattr(other.cosmo, param)
         check_equal = np.allclose(np.array(v1), np.array(v2), rtol=0.0, atol=1.0e-10)
         if not check_equal:
             raise RuntimeError("The values for the parameter '%s' in the two" % param +
                                " cosmologies are not identical (%s vs. %s)!" % (v1, v2))
     photons = {}
     for item1, item2 in zip(self.photons.items(), other.photons.items()):
         k1, v1 = item1
         k2, v2 = item2
         photons[k1] = uconcatenate([v1,v2])
     return PhotonList(photons, self.parameters, self.cosmo)
def test_numpy_wrappers():
    a1 = YTArray([1, 2, 3], 'cm')
    a2 = YTArray([2, 3, 4, 5, 6], 'cm')
    catenate_answer = [1, 2, 3, 2, 3, 4, 5, 6]
    intersect_answer = [2, 3]
    union_answer = [1, 2, 3, 4, 5, 6]

    yield (assert_array_equal, YTArray(catenate_answer, 'cm'),
           uconcatenate((a1, a2)))
    yield assert_array_equal, catenate_answer, np.concatenate((a1, a2))

    yield (assert_array_equal, YTArray(intersect_answer, 'cm'),
           uintersect1d(a1, a2))
    yield assert_array_equal, intersect_answer, np.intersect1d(a1, a2)

    yield assert_array_equal, YTArray(union_answer, 'cm'), uunion1d(a1, a2)
    yield assert_array_equal, union_answer, np.union1d(a1, a2)
예제 #23
0
def test_line_emission():

    bms = BetaModelSource()
    ds = bms.ds

    def _dm_emission(field, data):
        return cross_section * (data["dark_matter_density"] /
                                m_chi)**2 * data["cell_volume"]

    ds.add_field(("gas", "dm_emission"), function=_dm_emission, units="s**-1")

    location = YTQuantity(3.5, "keV")
    sigma = YTQuantity(1000., "km/s")
    sigma_E = (location * sigma / clight).in_units("keV")

    A = YTQuantity(1000., "cm**2")
    exp_time = YTQuantity(2.0e5, "s")
    redshift = 0.01

    sphere = ds.sphere("c", (100., "kpc"))

    line_model = LineSourceModel(location,
                                 "dm_emission",
                                 sigma="dark_matter_dispersion",
                                 prng=32)

    photons = PhotonList.from_data_source(sphere, redshift, A, exp_time,
                                          line_model)

    D_A = photons.parameters["fid_d_a"]
    dist_fac = 1.0 / (4. * np.pi * D_A * D_A * (1. + redshift)**3)
    dm_E = (sphere["dm_emission"]).sum()

    E = uconcatenate(photons["energy"])
    n_E = len(E)

    n_E_pred = (exp_time * A * dm_E * dist_fac).in_units("dimensionless")

    loc = location / (1. + redshift)
    sig = sigma_E / (1. + redshift)

    assert np.abs(loc - E.mean()) < 1.645 * sig / np.sqrt(n_E)
    assert np.abs(E.std()**2 -
                  sig * sig) < 1.645 * np.sqrt(2 * (n_E - 1)) * sig**2 / n_E
    assert np.abs(n_E - n_E_pred) < 1.645 * np.sqrt(n_E)
예제 #24
0
def test_line_emission():

    bms = BetaModelSource()
    ds = bms.ds

    def _dm_emission(field, data):
        return cross_section*(data["dark_matter_density"]/m_chi)**2*data["cell_volume"]
    ds.add_field(("gas","dm_emission"), function=_dm_emission, units="s**-1")

    location = YTQuantity(3.5, "keV")
    sigma = YTQuantity(1000., "km/s")
    sigma_E = (location*sigma/clight).in_units("keV")

    A = YTQuantity(1000., "cm**2")
    exp_time = YTQuantity(2.0e5, "s")
    redshift = 0.01

    sphere = ds.sphere("c", (100.,"kpc"))

    line_model = LineSourceModel(location, "dm_emission", 
                                 sigma="dark_matter_dispersion", prng=32)

    photons = PhotonList.from_data_source(sphere, redshift, A, exp_time,
                                          line_model)

    D_A = photons.parameters["fid_d_a"]
    dist_fac = 1.0/(4.*np.pi*D_A*D_A*(1.+redshift)**3)
    dm_E = (sphere["dm_emission"]).sum()

    E = uconcatenate(photons["energy"])
    n_E = len(E)

    n_E_pred = (exp_time*A*dm_E*dist_fac).in_units("dimensionless")

    loc = location/(1.+redshift)
    sig = sigma_E/(1.+redshift)

    assert np.abs(loc-E.mean()) < 1.645*sig/np.sqrt(n_E)
    assert np.abs(E.std()**2-sig*sig) < 1.645*np.sqrt(2*(n_E-1))*sig**2/n_E
    assert np.abs(n_E-n_E_pred) < 1.645*np.sqrt(n_E)
예제 #25
0
 def apply_to_stream(self, overwrite=False, **kwargs):
     """
     Apply the particles to a grid-based stream dataset. If particles
     already exist, and overwrite=False, do not overwrite them, but add
     the new ones to them.
     """
     if "clobber" in kwargs:
         issue_deprecation_warning(
             'The "clobber" keyword argument '
             'is deprecated. Use the "overwrite" '
             "argument, which has the same effect, "
             "instead."
         )
         overwrite = kwargs.pop("clobber")
     grid_data = []
     for i, g in enumerate(self.ds.index.grids):
         data = {}
         number_of_particles = self.NumberOfParticles[i]
         if not overwrite:
             number_of_particles += g.NumberOfParticles
         grid_particles = self.get_for_grid(g)
         for field in self.field_list:
             if number_of_particles > 0:
                 if (
                     g.NumberOfParticles > 0
                     and not overwrite
                     and field in self.ds.field_list
                 ):
                     # We have particles in this grid, we're not
                     # overwriting them, and the field is in the field
                     # list already
                     data[field] = uconcatenate([g[field], grid_particles[field]])
                 else:
                     # Otherwise, simply add the field in
                     data[field] = grid_particles[field]
             else:
                 # We don't have particles in this grid
                 data[field] = np.array([], dtype="float64")
         grid_data.append(data)
     self.ds.index.update_data(grid_data)
예제 #26
0
 def __add__(self, other):
     validate_parameters(self.parameters, other.parameters)
     for param in [
             "hubble_constant", "omega_matter", "omega_lambda",
             "omega_curvature"
     ]:
         v1 = getattr(self.cosmo, param)
         v2 = getattr(other.cosmo, param)
         check_equal = np.allclose(np.array(v1),
                                   np.array(v2),
                                   rtol=0.0,
                                   atol=1.0e-10)
         if not check_equal:
             raise RuntimeError(
                 "The values for the parameter '%s' in the two" % param +
                 " cosmologies are not identical (%s vs. %s)!" % (v1, v2))
     photons = {}
     for item1, item2 in zip(self.photons.items(), other.photons.items()):
         k1, v1 = item1
         k2, v2 = item2
         photons[k1] = uconcatenate([v1, v2])
     return PhotonList(photons, self.parameters, self.cosmo)
예제 #27
0
def all_data(data, ptype, fields, kdtree=False):
    field_data = {}
    fields = set(fields)
    for field in fields:
        field_data[field] = []

    for chunk in data.all_data().chunks([], "io"):
        for field in fields:
            field_data[field].append(chunk[ptype, field].in_base("code"))

    for field in fields:
        field_data[field] = uconcatenate(field_data[field])

    if kdtree is True:
        kdtree = data.index.kdtree
        for field in fields:
            if len(field_data[field].shape) == 1:
                field_data[field] = field_data[field][kdtree.idx]
            else:
                field_data[field] = field_data[field][kdtree.idx, :]

    return field_data
예제 #28
0
 def apply_to_stream(self, clobber=False):
     """
     Apply the particles to a stream dataset. If particles already exist,
     and clobber=False, do not overwrite them, but add the new ones to them. 
     """
     grid_data = []
     for i, g in enumerate(self.ds.index.grids):
         data = {}
         if clobber:
             data["number_of_particles"] = self.NumberOfParticles[i]
         else:
             data["number_of_particles"] = self.NumberOfParticles[i] + \
                                           g.NumberOfParticles
         grid_particles = self.get_for_grid(g)
         for field in self.field_list:
             if data["number_of_particles"] > 0:
                 # We have particles in this grid
                 if g.NumberOfParticles > 0 and not clobber:
                     # Particles already exist
                     if field in self.ds.field_list:
                         # This field already exists
                         prev_particles = g[field]
                     else:
                         # This one doesn't, set the previous particles' field
                         # values to zero
                         prev_particles = np.zeros((g.NumberOfParticles))
                         prev_particles = self.ds.arr(
                             prev_particles,
                             input_units=self.field_units[field])
                     data[field] = uconcatenate(
                         (prev_particles, grid_particles[field]))
                 else:
                     # Particles do not already exist or we're clobbering
                     data[field] = grid_particles[field]
             else:
                 # We don't have particles in this grid
                 data[field] = np.array([], dtype='float64')
         grid_data.append(data)
     self.ds.index.update_data(grid_data)
 def apply_to_stream(self, clobber=False):
     """
     Apply the particles to a stream dataset. If particles already exist,
     and clobber=False, do not overwrite them, but add the new ones to them. 
     """
     grid_data = []
     for i,g in enumerate(self.ds.index.grids):
         data = {}
         if clobber :
             data["number_of_particles"] = self.NumberOfParticles[i]
         else :
             data["number_of_particles"] = self.NumberOfParticles[i] + \
                                           g.NumberOfParticles
         grid_particles = self.get_for_grid(g)
         for field in self.field_list :
             if data["number_of_particles"] > 0:
                 # We have particles in this grid
                 if g.NumberOfParticles > 0 and not clobber:
                     # Particles already exist
                     if field in self.ds.field_list:
                         # This field already exists
                         prev_particles = g[field]
                     else:
                         # This one doesn't, set the previous particles' field
                         # values to zero
                         prev_particles = np.zeros((g.NumberOfParticles))
                         prev_particles = self.ds.arr(prev_particles,
                             input_units = self.field_units[field])
                     data[field] = uconcatenate((prev_particles,
                                                 grid_particles[field]))
                 else:
                     # Particles do not already exist or we're clobbering
                     data[field] = grid_particles[field]
             else:
                 # We don't have particles in this grid
                 data[field] = np.array([], dtype='float64')
         grid_data.append(data)
     self.ds.index.update_data(grid_data)
def test_particle_generator():
    # First generate our dataset
    domain_dims = (128, 128, 128)
    dens = np.zeros(domain_dims) + 0.1
    temp = 4.*np.ones(domain_dims)
    fields = {"density": (dens, 'code_mass/code_length**3'),
              "temperature": (temp, 'K')}
    ug = load_uniform_grid(fields, domain_dims, 1.0)
    fo = [ic.BetaModelSphere(1.0,0.1,0.5,[0.5,0.5,0.5],{"density":(10.0)})]
    rc = [fm.flagging_method_registry["overdensity"](4.0)]
    ds = refine_amr(ug, rc, fo, 3)

    # Now generate particles from density

    field_list = [("io", "particle_position_x"),
                  ("io", "particle_position_y"),
                  ("io", "particle_position_z"),
                  ("io", "particle_index"),
                  ("io", "particle_gas_density")]
    num_particles = 1000000
    field_dict = {("gas", "density"): ("io", "particle_gas_density")}
    sphere = ds.sphere(ds.domain_center, 0.45)

    particles1 = WithDensityParticleGenerator(ds, sphere, num_particles, field_list)
    particles1.assign_indices()
    particles1.map_grid_fields_to_particles(field_dict)
    
    # Test to make sure we ended up with the right number of particles per grid
    particles1.apply_to_stream()
    particles_per_grid1 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid1, particles1.NumberOfParticles
    particles_per_grid1 = [len(grid["particle_position_x"]) for grid in ds.index.grids]
    yield assert_equal, particles_per_grid1, particles1.NumberOfParticles

    tags = uconcatenate([grid["particle_index"] for grid in ds.index.grids])
    assert(np.unique(tags).size == num_particles)
    # Set up a lattice of particles
    pdims = np.array([64,64,64])
    def new_indices() :
        # We just add new indices onto the existing ones
        return np.arange((np.product(pdims)))+num_particles
    le = np.array([0.25,0.25,0.25])
    re = np.array([0.75,0.75,0.75])
    new_field_list = field_list + [("io", "particle_gas_temperature")]
    new_field_dict = {("gas", "density"): ("io", "particle_gas_density"),
                      ("gas", "temperature"): ("io", "particle_gas_temperature")}

    particles2 = LatticeParticleGenerator(ds, pdims, le, re, new_field_list)
    particles2.assign_indices(function=new_indices)
    particles2.map_grid_fields_to_particles(new_field_dict)

    #Test lattice positions
    xpos = np.unique(particles2["io", "particle_position_x"])
    ypos = np.unique(particles2["io", "particle_position_y"])
    zpos = np.unique(particles2["io", "particle_position_z"])

    xpred = np.linspace(le[0],re[0],num=pdims[0],endpoint=True)
    ypred = np.linspace(le[1],re[1],num=pdims[1],endpoint=True)
    zpred = np.linspace(le[2],re[2],num=pdims[2],endpoint=True)

    assert_almost_equal( xpos, xpred)
    assert_almost_equal( ypos, ypred)
    assert_almost_equal( zpos, zpred)

    #Test the number of particles again
    particles2.apply_to_stream()
    particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles

    [grid.field_data.clear() for grid in ds.index.grids]
    particles_per_grid2 = [len(grid["particle_position_x"]) for grid in ds.index.grids]
    yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles

    #Test the uniqueness of tags
    tags = np.concatenate([grid["particle_index"] for grid in ds.index.grids])
    tags.sort()
    yield assert_equal, tags, np.arange((np.product(pdims)+num_particles))

    # Test that the old particles have zero for the new field
    old_particle_temps = [grid["particle_gas_temperature"][:particles_per_grid1[i]]
                          for i, grid in enumerate(ds.index.grids)]
    test_zeros = [np.zeros((particles_per_grid1[i])) 
                  for i, grid in enumerate(ds.index.grids)]
    yield assert_equal, old_particle_temps, test_zeros

    #Now dump all of these particle fields out into a dict
    pdata = {}
    dd = ds.all_data()
    for field in new_field_list :
        pdata[field] = dd[field]

    #Test the "from-list" generator and particle field clobber
    particles3 = FromListParticleGenerator(ds, num_particles+np.product(pdims), pdata)
    particles3.apply_to_stream(clobber=True)
    
    #Test the number of particles again
    particles_per_grid3 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles
    particles_per_grid2 = [len(grid["particle_position_z"]) for grid in ds.index.grids]
    yield assert_equal, particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles
예제 #31
0
    def generate_events(self,
                        area,
                        exp_time,
                        angular_width,
                        source_model,
                        sky_center,
                        parameters=None,
                        velocity_fields=None,
                        absorb_model=None,
                        nH=None,
                        no_shifting=False,
                        sigma_pos=None,
                        prng=None):
        """
        Generate projected events from a light cone simulation. 

        Parameters
        ----------
        area : float, (value, unit) tuple, or :class:`~yt.units.yt_array.YTQuantity`
            The collecting area to determine the number of events. If units are
            not specified, it is assumed to be in cm^2.
        exp_time : float, (value, unit) tuple, or :class:`~yt.units.yt_array.YTQuantity`
            The exposure time to determine the number of events. If units are
            not specified, it is assumed to be in seconds.
        angular_width : float, (value, unit) tuple, or :class:`~yt.units.yt_array.YTQuantity`
            The angular width of the light cone simulation. If units are not
            specified, it is assumed to be in degrees.
        source_model : :class:`~pyxsim.source_models.SourceModel`
            A source model used to generate the events.
        sky_center : array-like
            Center RA, Dec of the events in degrees.
        parameters : dict, optional
            A dictionary of parameters to be passed for the source model to use,
            if necessary.
        velocity_fields : list of fields
            The yt fields to use for the velocity. If not specified, the following will
            be assumed:
            ['velocity_x', 'velocity_y', 'velocity_z'] for grid datasets
            ['particle_velocity_x', 'particle_velocity_y', 'particle_velocity_z'] for particle datasets
        absorb_model : string or :class:`~pyxsim.spectral_models.AbsorptionModel` 
            A model for foreground galactic absorption, to simulate the absorption
            of events before being detected. This cannot be applied here if you 
            already did this step previously in the creation of the 
            :class:`~pyxsim.photon_list.PhotonList` instance. Known options for 
            strings are "wabs" and "tbabs".
        nH : float, optional
            The foreground column density in units of 10^22 cm^{-2}. Only used if
            absorption is applied.
        no_shifting : boolean, optional
            If set, the photon energies will not be Doppler shifted.
        sigma_pos : float, optional
            Apply a gaussian smoothing operation to the sky positions of the
            events. This may be useful when the binned events appear blocky due
            to their uniform distribution within simulation cells. However, this
            will move the events away from their originating position on the
            sky, and so may distort surface brightness profiles and/or spectra.
            Should probably only be used for visualization purposes. Supply a
            float here to smooth with a standard deviation with this fraction
            of the cell size. Default: None
        prng : integer or :class:`~numpy.random.RandomState` object
            A pseudo-random number generator. Typically will only be specified
            if you have a reason to generate the same set of random numbers, such as for a
            test. Default is to use the :mod:`numpy.random` module.
        """
        prng = parse_prng(prng)

        area = parse_value(area, "cm**2")
        exp_time = parse_value(exp_time, "s")
        aw = parse_value(angular_width, "deg")

        tot_events = defaultdict(list)

        for output in self.light_cone_solution:
            ds = load(output["filename"])
            ax = output["projection_axis"]
            c = output[
                "projection_center"] * ds.domain_width + ds.domain_left_edge
            le = c.copy()
            re = c.copy()
            width = ds.quan(aw * output["box_width_per_angle"],
                            "unitary").to("code_length")
            depth = ds.domain_width[ax].in_units(
                "code_length") * output["box_depth_fraction"]
            le[ax] -= 0.5 * depth
            re[ax] += 0.5 * depth
            for off_ax in axes_lookup[ax]:
                le[off_ax] -= 0.5 * width
                re[off_ax] += 0.5 * width
            reg = ds.box(le, re)
            photons = PhotonList.from_data_source(
                reg,
                output['redshift'],
                area,
                exp_time,
                source_model,
                parameters=parameters,
                center=c,
                velocity_fields=velocity_fields,
                cosmology=ds.cosmology)
            if sum(photons["num_photons"]) > 0:
                events = photons.project_photons("xyz"[ax],
                                                 sky_center,
                                                 absorb_model=absorb_model,
                                                 nH=nH,
                                                 no_shifting=no_shifting,
                                                 sigma_pos=sigma_pos,
                                                 prng=prng)
                if events.num_events > 0:
                    tot_events["xsky"].append(events["xsky"])
                    tot_events["ysky"].append(events["ysky"])
                    tot_events["eobs"].append(events["eobs"])
                del events

            del photons

        parameters = {
            "exp_time": exp_time,
            "area": area,
            "sky_center": YTArray(sky_center, "deg")
        }

        for key in tot_events:
            tot_events[key] = uconcatenate(tot_events[key])

        return EventList(tot_events, parameters)
예제 #32
0
def test_xray_binaries():

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    prng = 25

    ds = data_dir_load(galaxy)

    def _age(field, data):
        z_s = 1.0 / data["PartType4", "StellarFormationTime"] - 1.0
        age = data.ds.cosmology.t_from_z(0.0) - data.ds.cosmology.t_from_z(z_s)
        age.convert_to_units("Gyr")
        return age

    ds.add_field(("PartType4", "particle_age"), function=_age, units="Gyr", 
                 particle_type=True)

    sp = ds.sphere("max", (0.25, "Mpc"))

    scale_length = (1.0, "kpc")
    age_field = ("PartType4", "particle_age")

    new_ds = make_xrb_particles(sp, age_field, scale_length, prng=prng)

    dd = new_ds.all_data()

    area = ds.quan(25000.0, "cm**2")
    exp_time = ds.quan(300.0, "ks")
    emin = 0.1
    emax = 10.0
    redshift = 0.01

    photons_xrb = make_xrb_photons(new_ds, redshift, area, 
                                   exp_time, emin, emax,
                                   center=sp.center,
                                   cosmology=ds.cosmology, prng=prng)

    D_L = ds.cosmology.luminosity_distance(0.0, redshift)

    kappa = convert_bands(alpha_lmxb, emin_hmxb, emax_hmxb,
                          emin_lmxb, emax_lmxb)

    E = uconcatenate(photons_xrb["energy"])

    lmxb_idxs = dd["particle_spectral_index"] < 2.0
    hmxb_idxs = np.logical_not(lmxb_idxs)

    flux = dd["particle_luminosity"][lmxb_idxs].sum()*kappa/bc_lmxb
    flux += dd["particle_luminosity"][hmxb_idxs].sum()/bc_hmxb
    flux /= 4.0*np.pi*D_L*D_L

    idxs = E*(1.0+redshift) > 2.0
    E_mean = E[idxs].mean().to("erg")
    n1 = flux*exp_time*area/E_mean
    n2 = idxs.sum()
    dn = np.sqrt(n2)

    assert np.abs(n1-n2) < 1.645*dn

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
    def __init__(self, ds, data_source, num_particles, field_list,
                 density_field="density"):
        r"""
        Generate particles based on a density field.

        Parameters
        ----------
        ds : `Dataset`
            The dataset which will serve as the base for these particles.
        data_source : `yt.data_objects.data_containers.YTSelectionContainer`
            The data source containing the density field.
        num_particles : int
            The number of particles to be generated
        field_list : list of strings
            A list of particle fields
        density_field : string, optional
            A density field which will serve as the distribution function for the
            particle positions. Theoretically, this could be any 'per-volume' field. 
            
        Examples
        --------
        >>> sphere = ds.sphere(ds.domain_center, 0.5)
        >>> num_p = 100000
        >>> fields = ["particle_position_x","particle_position_y",
        >>>           "particle_position_z",
        >>>           "particle_density","particle_temperature"]
        >>> particles = WithDensityParticleGenerator(ds, sphere, num_particles,
        >>>                                          fields, density_field='Dark_Matter_Density')
        """

        ParticleGenerator.__init__(self, ds, num_particles, field_list)

        num_cells = len(data_source["x"].flat)
        max_mass = (data_source[density_field]*
                    data_source["cell_volume"]).max()
        num_particles_left = num_particles
        all_x = []
        all_y = []
        all_z = []
        
        pbar = get_pbar("Generating Particles", num_particles)
        tot_num_accepted = int(0)
        
        while num_particles_left > 0:

            m = np.random.uniform(high=1.01*max_mass,
                                  size=num_particles_left)
            idxs = np.random.random_integers(low=0, high=num_cells-1,
                                             size=num_particles_left)
            m_true = (data_source[density_field]*
                      data_source["cell_volume"]).flat[idxs]
            accept = m <= m_true
            num_accepted = accept.sum()
            accepted_idxs = idxs[accept]
            
            xpos = data_source["x"].flat[accepted_idxs] + \
                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
                   data_source["dx"].flat[accepted_idxs]
            ypos = data_source["y"].flat[accepted_idxs] + \
                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
                   data_source["dy"].flat[accepted_idxs]
            zpos = data_source["z"].flat[accepted_idxs] + \
                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
                   data_source["dz"].flat[accepted_idxs]

            all_x.append(xpos)
            all_y.append(ypos)
            all_z.append(zpos)

            num_particles_left -= num_accepted
            tot_num_accepted += num_accepted
            pbar.update(tot_num_accepted)

        pbar.finish()

        x = uconcatenate(all_x)
        y = uconcatenate(all_y)
        z = uconcatenate(all_z)

        self._setup_particles(x,y,z)
예제 #34
0
    def __init__(self,
                 ds,
                 data_source,
                 num_particles,
                 field_list,
                 density_field="density"):
        r"""
        Generate particles based on a density field.

        Parameters
        ----------
        ds : `Dataset`
            The dataset which will serve as the base for these particles.
        data_source : `yt.data_objects.data_containers.YTSelectionContainer`
            The data source containing the density field.
        num_particles : int
            The number of particles to be generated
        field_list : list of strings
            A list of particle fields
        density_field : string, optional
            A density field which will serve as the distribution function for the
            particle positions. Theoretically, this could be any 'per-volume' field. 
            
        Examples
        --------
        >>> sphere = ds.sphere(ds.domain_center, 0.5)
        >>> num_p = 100000
        >>> fields = ["particle_position_x","particle_position_y",
        >>>           "particle_position_z",
        >>>           "particle_density","particle_temperature"]
        >>> particles = WithDensityParticleGenerator(ds, sphere, num_particles,
        >>>                                          fields, density_field='Dark_Matter_Density')
        """

        ParticleGenerator.__init__(self, ds, num_particles, field_list)

        num_cells = len(data_source["x"].flat)
        max_mass = (data_source[density_field] *
                    data_source["cell_volume"]).max()
        num_particles_left = num_particles
        all_x = []
        all_y = []
        all_z = []

        pbar = get_pbar("Generating Particles", num_particles)
        tot_num_accepted = int(0)

        while num_particles_left > 0:

            m = np.random.uniform(high=1.01 * max_mass,
                                  size=num_particles_left)
            idxs = np.random.random_integers(low=0,
                                             high=num_cells - 1,
                                             size=num_particles_left)
            m_true = (data_source[density_field] *
                      data_source["cell_volume"]).flat[idxs]
            accept = m <= m_true
            num_accepted = accept.sum()
            accepted_idxs = idxs[accept]

            xpos = data_source["x"].flat[accepted_idxs] + \
                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
                   data_source["dx"].flat[accepted_idxs]
            ypos = data_source["y"].flat[accepted_idxs] + \
                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
                   data_source["dy"].flat[accepted_idxs]
            zpos = data_source["z"].flat[accepted_idxs] + \
                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
                   data_source["dz"].flat[accepted_idxs]

            all_x.append(xpos)
            all_y.append(ypos)
            all_z.append(zpos)

            num_particles_left -= num_accepted
            tot_num_accepted += num_accepted
            pbar.update(tot_num_accepted)

        pbar.finish()

        x = uconcatenate(all_x)
        y = uconcatenate(all_y)
        z = uconcatenate(all_z)

        self._setup_particles(x, y, z)
    def __call__(self, data_source, parameters):

        ds = data_source.ds

        exp_time = parameters["FiducialExposureTime"]
        area = parameters["FiducialArea"]
        redshift = parameters["FiducialRedshift"]
        D_A = parameters["FiducialAngularDiameterDistance"].in_cgs()
        dist_fac = 1.0 / (4. * np.pi * D_A.value * D_A.value *
                          (1. + redshift)**2)
        src_ctr = parameters["center"]

        my_kT_min, my_kT_max = data_source.quantities.extrema("kT")

        self.spectral_model.prepare_spectrum(redshift)
        emid = self.spectral_model.emid
        ebins = self.spectral_model.ebins
        nchan = len(emid)

        citer = data_source.chunks([], "io")

        photons = {}
        photons["x"] = []
        photons["y"] = []
        photons["z"] = []
        photons["vx"] = []
        photons["vy"] = []
        photons["vz"] = []
        photons["dx"] = []
        photons["Energy"] = []
        photons["NumberOfPhotons"] = []

        spectral_norm = area.v * exp_time.v * dist_fac

        tot_num_cells = data_source.ires.shape[0]

        pbar = get_pbar("Generating photons ", tot_num_cells)

        cell_counter = 0

        for chunk in parallel_objects(citer):

            kT = chunk["kT"].v
            num_cells = len(kT)
            if num_cells == 0:
                continue
            vol = chunk["cell_volume"].in_cgs().v
            EM = (chunk["density"] / mp).in_cgs().v**2
            EM *= 0.5 * (1. + self.X_H) * self.X_H * vol

            if isinstance(self.Zmet, string_types):
                metalZ = chunk[self.Zmet].v
            else:
                metalZ = self.Zmet * np.ones(num_cells)

            idxs = np.argsort(kT)

            kT_bins = np.linspace(kT_min,
                                  max(my_kT_max.v, kT_max),
                                  num=n_kT + 1)
            dkT = kT_bins[1] - kT_bins[0]
            kT_idxs = np.digitize(kT[idxs], kT_bins)
            kT_idxs = np.minimum(np.maximum(1, kT_idxs), n_kT) - 1
            bcounts = np.bincount(kT_idxs).astype("int")
            bcounts = bcounts[bcounts > 0]
            n = int(0)
            bcell = []
            ecell = []
            for bcount in bcounts:
                bcell.append(n)
                ecell.append(n + bcount)
                n += bcount
            kT_idxs = np.unique(kT_idxs)

            cell_em = EM[idxs] * spectral_norm

            number_of_photons = np.zeros(num_cells, dtype="uint64")
            energies = np.zeros(self.photons_per_chunk)

            start_e = 0
            end_e = 0

            for ibegin, iend, ikT in zip(bcell, ecell, kT_idxs):

                kT = kT_bins[ikT] + 0.5 * dkT

                n_current = iend - ibegin

                cem = cell_em[ibegin:iend]

                cspec, mspec = self.spectral_model.get_spectrum(kT)

                tot_ph_c = cspec.d.sum()
                tot_ph_m = mspec.d.sum()

                u = self.prng.uniform(size=n_current)

                cell_norm_c = tot_ph_c * cem
                cell_norm_m = tot_ph_m * metalZ[ibegin:iend] * cem
                cell_norm = np.modf(cell_norm_c + cell_norm_m)
                cell_n = np.uint64(cell_norm[1]) + np.uint64(cell_norm[0] >= u)

                number_of_photons[ibegin:iend] = cell_n

                end_e += int(cell_n.sum())

                if end_e > self.photons_per_chunk:
                    raise RuntimeError(
                        "Number of photons generated for this chunk " +
                        "exceeds photons_per_chunk (%d)! " %
                        self.photons_per_chunk + "Increase photons_per_chunk!")

                if self.method == "invert_cdf":
                    cumspec_c = np.cumsum(cspec.d)
                    cumspec_m = np.cumsum(mspec.d)
                    cumspec_c = np.insert(cumspec_c, 0, 0.0)
                    cumspec_m = np.insert(cumspec_m, 0, 0.0)

                ei = start_e
                for cn, Z in zip(number_of_photons[ibegin:iend],
                                 metalZ[ibegin:iend]):
                    if cn == 0: continue
                    # The rather verbose form of the few next statements is a
                    # result of code optimization and shouldn't be changed
                    # without checking for performance degradation. See
                    # https://bitbucket.org/yt_analysis/yt/pull-requests/1766
                    # for details.
                    if self.method == "invert_cdf":
                        cumspec = cumspec_c
                        cumspec += Z * cumspec_m
                        norm_factor = 1.0 / cumspec[-1]
                        cumspec *= norm_factor
                        randvec = self.prng.uniform(size=cn)
                        randvec.sort()
                        cell_e = np.interp(randvec, cumspec, ebins)
                    elif self.method == "accept_reject":
                        tot_spec = cspec.d
                        tot_spec += Z * mspec.d
                        norm_factor = 1.0 / tot_spec.sum()
                        tot_spec *= norm_factor
                        eidxs = self.prng.choice(nchan, size=cn, p=tot_spec)
                        cell_e = emid[eidxs]
                    energies[int(ei):int(ei + cn)] = cell_e
                    cell_counter += 1
                    pbar.update(cell_counter)
                    ei += cn

                start_e = end_e

            active_cells = number_of_photons > 0
            idxs = idxs[active_cells]

            photons["NumberOfPhotons"].append(number_of_photons[active_cells])
            photons["Energy"].append(ds.arr(energies[:end_e].copy(), "keV"))
            photons["x"].append(
                (chunk["x"][idxs] - src_ctr[0]).in_units("kpc"))
            photons["y"].append(
                (chunk["y"][idxs] - src_ctr[1]).in_units("kpc"))
            photons["z"].append(
                (chunk["z"][idxs] - src_ctr[2]).in_units("kpc"))
            photons["vx"].append(chunk["velocity_x"][idxs].in_units("km/s"))
            photons["vy"].append(chunk["velocity_y"][idxs].in_units("km/s"))
            photons["vz"].append(chunk["velocity_z"][idxs].in_units("km/s"))
            photons["dx"].append(chunk["dx"][idxs].in_units("kpc"))

        pbar.finish()

        for key in photons:
            if len(photons[key]) > 0:
                photons[key] = uconcatenate(photons[key])
            elif key == "NumberOfPhotons":
                photons[key] = np.array([])
            else:
                photons[key] = YTArray([], photon_units[key])

        mylog.info("Number of photons generated: %d" %
                   int(np.sum(photons["NumberOfPhotons"])))
        mylog.info("Number of cells with photons: %d" % len(photons["x"]))

        self.spectral_model.cleanup_spectrum()

        return photons
예제 #36
0
def test_particle_generator():
    # First generate our dataset
    domain_dims = (32, 32, 32)
    dens = np.zeros(domain_dims) + 0.1
    temp = 4.0 * np.ones(domain_dims)
    fields = {
        "density": (dens, "code_mass/code_length**3"),
        "temperature": (temp, "K")
    }
    ug = load_uniform_grid(fields, domain_dims, 1.0)
    fo = [
        ic.BetaModelSphere(1.0, 0.1, 0.5, [0.5, 0.5, 0.5], {"density": (10.0)})
    ]
    rc = [fm.flagging_method_registry["overdensity"](4.0)]
    ds = refine_amr(ug, rc, fo, 3)

    # Now generate particles from density

    field_list = [
        ("io", "particle_position_x"),
        ("io", "particle_position_y"),
        ("io", "particle_position_z"),
        ("io", "particle_index"),
        ("io", "particle_gas_density"),
    ]
    num_particles = 10000
    field_dict = {("gas", "density"): ("io", "particle_gas_density")}
    sphere = ds.sphere(ds.domain_center, 0.45)

    particles1 = WithDensityParticleGenerator(ds, sphere, num_particles,
                                              field_list)
    particles1.assign_indices()
    particles1.map_grid_fields_to_particles(field_dict)

    # Test to make sure we ended up with the right number of particles per grid
    particles1.apply_to_stream()
    particles_per_grid1 = [grid.NumberOfParticles for grid in ds.index.grids]
    assert_equal(particles_per_grid1, particles1.NumberOfParticles)
    particles_per_grid1 = [
        len(grid["particle_position_x"]) for grid in ds.index.grids
    ]
    assert_equal(particles_per_grid1, particles1.NumberOfParticles)

    tags = uconcatenate([grid["particle_index"] for grid in ds.index.grids])
    assert np.unique(tags).size == num_particles

    del tags

    # Set up a lattice of particles
    pdims = np.array([32, 32, 32])

    def new_indices():
        # We just add new indices onto the existing ones
        return np.arange((np.product(pdims))) + num_particles

    le = np.array([0.25, 0.25, 0.25])
    re = np.array([0.75, 0.75, 0.75])

    particles2 = LatticeParticleGenerator(ds, pdims, le, re, field_list)
    particles2.assign_indices(function=new_indices)
    particles2.map_grid_fields_to_particles(field_dict)

    # Test lattice positions
    xpos = np.unique(particles2["io", "particle_position_x"])
    ypos = np.unique(particles2["io", "particle_position_y"])
    zpos = np.unique(particles2["io", "particle_position_z"])

    xpred = np.linspace(le[0], re[0], num=pdims[0], endpoint=True)
    ypred = np.linspace(le[1], re[1], num=pdims[1], endpoint=True)
    zpred = np.linspace(le[2], re[2], num=pdims[2], endpoint=True)

    assert_almost_equal(xpos, xpred)
    assert_almost_equal(ypos, ypred)
    assert_almost_equal(zpos, zpred)

    del xpos, ypos, zpos
    del xpred, ypred, zpred

    # Test the number of particles again
    particles2.apply_to_stream()
    particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]
    assert_equal(particles_per_grid2,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)

    [grid.field_data.clear() for grid in ds.index.grids]
    particles_per_grid2 = [
        len(grid["particle_position_x"]) for grid in ds.index.grids
    ]
    assert_equal(particles_per_grid2,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)

    # Test the uniqueness of tags
    tags = np.concatenate([grid["particle_index"] for grid in ds.index.grids])
    tags.sort()
    assert_equal(tags, np.arange((np.product(pdims) + num_particles)))

    del tags

    # Now dump all of these particle fields out into a dict
    pdata = {}
    dd = ds.all_data()
    for field in field_list:
        pdata[field] = dd[field]

    # Test the "from-list" generator and particle field overwrite
    num_particles3 = num_particles + np.product(pdims)
    particles3 = FromListParticleGenerator(ds, num_particles3, pdata)
    particles3.apply_to_stream(overwrite=True)

    # Test the number of particles again
    particles_per_grid3 = [grid.NumberOfParticles for grid in ds.index.grids]
    assert_equal(particles_per_grid3,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)
    particles_per_grid2 = [
        len(grid["particle_position_z"]) for grid in ds.index.grids
    ]
    assert_equal(particles_per_grid3,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)
    assert_equal(particles_per_grid2, particles_per_grid3)

    # Test adding in particles with a different particle type

    num_star_particles = 20000
    pdata2 = {
        ("star", "particle_position_x"):
        np.random.uniform(size=num_star_particles),
        ("star", "particle_position_y"):
        np.random.uniform(size=num_star_particles),
        ("star", "particle_position_z"):
        np.random.uniform(size=num_star_particles),
    }

    particles4 = FromListParticleGenerator(ds,
                                           num_star_particles,
                                           pdata2,
                                           ptype="star")
    particles4.apply_to_stream()

    dd = ds.all_data()
    assert dd["star", "particle_position_x"].size == num_star_particles
    assert dd["io", "particle_position_x"].size == num_particles3
    assert dd[
        "all",
        "particle_position_x"].size == num_star_particles + num_particles3

    del pdata
    del pdata2
    del ds
    del particles1
    del particles2
    del particles4
    del fields
    del dens
    del temp
예제 #37
0
def test_sloshing():

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    prng = RandomState(0x4d3d3d3)

    ds = data_dir_load(gslr)
    A = 2000.
    exp_time = 1.0e4
    redshift = 0.1

    apec_model = TableApecModel(APEC, 0.1, 11.0, 10000)
    tbabs_model = TableAbsorbModel(TBABS, 0.1)

    sphere = ds.sphere("c", (0.1, "Mpc"))

    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3, prng=prng)
    photons1 = PhotonList.from_scratch(sphere, redshift, A, exp_time,
                                       thermal_model)

    return_photons = return_data(photons1.photons)

    tests = [GenericArrayTest(ds, return_photons, args=["photons"])]

    for a, r in zip(arfs, rmfs):
        arf = os.path.join(xray_data_dir, a)
        rmf = os.path.join(xray_data_dir, r)
        events1 = photons1.project_photons([1.0,-0.5,0.2], responses=[arf,rmf],
                                          absorb_model=tbabs_model, 
                                          convolve_energies=True, prng=prng)
        events1['xsky']
        return_events = return_data(events1.events)

        tests.append(GenericArrayTest(ds, return_events, args=[a]))

    for test in tests:
        test_sloshing.__name__ = test.description
        yield test

    photons1.write_h5_file("test_photons.h5")
    events1.write_h5_file("test_events.h5")

    photons2 = PhotonList.from_file("test_photons.h5")
    events2 = EventList.from_h5_file("test_events.h5")

    convert_old_file(old_photon_file, "converted_photons.h5")
    convert_old_file(old_event_file, "converted_events.h5")

    PhotonList.from_file("converted_photons.h5")
    EventList.from_h5_file("converted_events.h5")

    for k in photons1.keys():
        if k == "Energy":
            arr1 = uconcatenate(photons1[k])
            arr2 = uconcatenate(photons2[k])
        else:
            arr1 = photons1[k]
            arr2 = photons2[k]
        assert_almost_equal(arr1, arr2)
    for k in events1.keys():
        assert_almost_equal(events1[k], events2[k])

    nevents = 0

    for i in range(4):
        events = photons1.project_photons([1.0,-0.5,0.2],
                                         exp_time_new=0.25*exp_time,
                                         absorb_model=tbabs_model,
                                         prng=prng)
        events.write_h5_file("split_events_%d.h5" % i)
        nevents += len(events["xsky"])

    merge_files(["split_events_%d.h5" % i for i in range(4)],
                "merged_events.h5", add_exposure_times=True,
                clobber=True)

    merged_events = EventList.from_h5_file("merged_events.h5")
    assert len(merged_events["xsky"]) == nevents
    assert merged_events.parameters["ExposureTime"] == exp_time

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
    def __call__(self, data_source, parameters):

        ds = data_source.ds

        exp_time = parameters["FiducialExposureTime"]
        area = parameters["FiducialArea"]
        redshift = parameters["FiducialRedshift"]
        D_A = parameters["FiducialAngularDiameterDistance"].in_cgs()
        dist_fac = 1.0/(4.*np.pi*D_A.value*D_A.value*(1.+redshift)**2)
        src_ctr = parameters["center"]

        my_kT_min, my_kT_max = data_source.quantities.extrema("kT")

        self.spectral_model.prepare_spectrum(redshift)
        emid = self.spectral_model.emid
        ebins = self.spectral_model.ebins
        nchan = len(emid)

        citer = data_source.chunks([], "io")

        photons = {}
        photons["x"] = []
        photons["y"] = []
        photons["z"] = []
        photons["vx"] = []
        photons["vy"] = []
        photons["vz"] = []
        photons["dx"] = []
        photons["Energy"] = []
        photons["NumberOfPhotons"] = []

        spectral_norm = area.v*exp_time.v*dist_fac

        tot_num_cells = data_source.ires.shape[0]

        pbar = get_pbar("Generating photons ", tot_num_cells)

        cell_counter = 0

        for chunk in parallel_objects(citer):

            kT = chunk["kT"].v
            num_cells = len(kT)
            if num_cells == 0:
                continue
            vol = chunk["cell_volume"].in_cgs().v
            EM = (chunk["density"]/mp).v**2
            EM *= 0.5*(1.+self.X_H)*self.X_H*vol

            if isinstance(self.Zmet, string_types):
                metalZ = chunk[self.Zmet].v
            else:
                metalZ = self.Zmet*np.ones(num_cells)

            idxs = np.argsort(kT)

            kT_bins = np.linspace(kT_min, max(my_kT_max, kT_max), num=n_kT+1)
            dkT = kT_bins[1]-kT_bins[0]
            kT_idxs = np.digitize(kT[idxs], kT_bins)
            kT_idxs = np.minimum(np.maximum(1, kT_idxs), n_kT) - 1
            bcounts = np.bincount(kT_idxs).astype("int")
            bcounts = bcounts[bcounts > 0]
            n = int(0)
            bcell = []
            ecell = []
            for bcount in bcounts:
                bcell.append(n)
                ecell.append(n+bcount)
                n += bcount
            kT_idxs = np.unique(kT_idxs)

            cell_em = EM[idxs]*spectral_norm

            number_of_photons = np.zeros(num_cells, dtype="uint64")
            energies = np.zeros(self.photons_per_chunk)

            start_e = 0
            end_e = 0

            for ibegin, iend, ikT in zip(bcell, ecell, kT_idxs):

                kT = kT_bins[ikT] + 0.5*dkT

                n_current = iend-ibegin

                cem = cell_em[ibegin:iend]

                cspec, mspec = self.spectral_model.get_spectrum(kT)

                tot_ph_c = cspec.d.sum()
                tot_ph_m = mspec.d.sum()

                u = np.random.random(size=n_current)

                cell_norm_c = tot_ph_c*cem
                cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem
                cell_norm = np.modf(cell_norm_c + cell_norm_m)
                cell_n = np.uint64(cell_norm[1]) + np.uint64(cell_norm[0] >= u)

                number_of_photons[ibegin:iend] = cell_n

                end_e += int(cell_n.sum())

                if end_e > self.photons_per_chunk:
                    raise RuntimeError("Number of photons generated for this chunk "+
                                       "exceeds photons_per_chunk (%d)! " % self.photons_per_chunk +
                                       "Increase photons_per_chunk!")

                if self.method == "invert_cdf":
                    cumspec_c = np.cumsum(cspec.d)
                    cumspec_m = np.cumsum(mspec.d)
                    cumspec_c = np.insert(cumspec_c, 0, 0.0)
                    cumspec_m = np.insert(cumspec_m, 0, 0.0)

                ei = start_e
                for cn, Z in zip(number_of_photons[ibegin:iend], metalZ[ibegin:iend]):
                    if cn == 0: continue
                    if self.method == "invert_cdf":
                        cumspec = cumspec_c + Z*cumspec_m
                        cumspec /= cumspec[-1]
                        randvec = np.random.uniform(size=cn)
                        randvec.sort()
                        cell_e = np.interp(randvec, cumspec, ebins)
                    elif self.method == "accept_reject":
                        tot_spec = cspec.d+Z*mspec.d
                        tot_spec /= tot_spec.sum()
                        eidxs = np.random.choice(nchan, size=cn, p=tot_spec)
                        cell_e = emid[eidxs]
                    energies[ei:ei+cn] = cell_e
                    cell_counter += 1
                    pbar.update(cell_counter)
                    ei += cn

                start_e = end_e

            active_cells = number_of_photons > 0
            idxs = idxs[active_cells]

            photons["NumberOfPhotons"].append(number_of_photons[active_cells])
            photons["Energy"].append(ds.arr(energies[:end_e].copy(), "keV"))
            photons["x"].append((chunk["x"][idxs]-src_ctr[0]).in_units("kpc"))
            photons["y"].append((chunk["y"][idxs]-src_ctr[1]).in_units("kpc"))
            photons["z"].append((chunk["z"][idxs]-src_ctr[2]).in_units("kpc"))
            photons["vx"].append(chunk["velocity_x"][idxs].in_units("km/s"))
            photons["vy"].append(chunk["velocity_y"][idxs].in_units("km/s"))
            photons["vz"].append(chunk["velocity_z"][idxs].in_units("km/s"))
            photons["dx"].append(chunk["dx"][idxs].in_units("kpc"))

        pbar.finish()

        for key in photons:
            if len(photons[key]) > 0:
                photons[key] = uconcatenate(photons[key])
            elif key == "NumberOfPhotons":
                photons[key] = np.array([])
            else:
                photons[key] = YTArray([], photon_units[key])

        mylog.info("Number of photons generated: %d" % int(np.sum(photons["NumberOfPhotons"])))
        mylog.info("Number of cells with photons: %d" % len(photons["x"]))

        return photons
 def _cat_field(field, data):
     return uconcatenate([data[dep_type, field_name]
                          for dep_type in data.ds.particle_types_raw])
예제 #40
0
 def _cat_field(field, data):
     return uconcatenate([
         data[dep_type, field_name]
         for dep_type in data.ds.particle_types_raw
     ])
예제 #41
0
def test_particle_generator():
    # First generate our pf
    domain_dims = (128, 128, 128)
    dens = np.zeros(domain_dims) + 0.1
    temp = 4. * np.ones(domain_dims)
    fields = {
        "density": (dens, 'code_mass/code_length**3'),
        "temperature": (temp, 'K')
    }
    ug = load_uniform_grid(fields, domain_dims, 1.0)
    fo = [
        ic.BetaModelSphere(1.0, 0.1, 0.5, [0.5, 0.5, 0.5], {"density": (10.0)})
    ]
    rc = [fm.flagging_method_registry["overdensity"](4.0)]
    ds = refine_amr(ug, rc, fo, 3)

    # Now generate particles from density

    field_list = [("io", "particle_position_x"), ("io", "particle_position_y"),
                  ("io", "particle_position_z"), ("io", "particle_index"),
                  ("io", "particle_gas_density")]
    num_particles = 1000000
    field_dict = {("gas", "density"): ("io", "particle_gas_density")}
    sphere = ds.sphere(ds.domain_center, 0.45)

    particles1 = WithDensityParticleGenerator(ds, sphere, num_particles,
                                              field_list)
    particles1.assign_indices()
    particles1.map_grid_fields_to_particles(field_dict)

    # Test to make sure we ended up with the right number of particles per grid
    particles1.apply_to_stream()
    particles_per_grid1 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid1, particles1.NumberOfParticles
    particles_per_grid1 = [
        len(grid["particle_position_x"]) for grid in ds.index.grids
    ]
    yield assert_equal, particles_per_grid1, particles1.NumberOfParticles

    tags = uconcatenate([grid["particle_index"] for grid in ds.index.grids])
    assert (np.unique(tags).size == num_particles)
    # Set up a lattice of particles
    pdims = np.array([64, 64, 64])

    def new_indices():
        # We just add new indices onto the existing ones
        return np.arange((np.product(pdims))) + num_particles

    le = np.array([0.25, 0.25, 0.25])
    re = np.array([0.75, 0.75, 0.75])
    new_field_list = field_list + [("io", "particle_gas_temperature")]
    new_field_dict = {
        ("gas", "density"): ("io", "particle_gas_density"),
        ("gas", "temperature"): ("io", "particle_gas_temperature")
    }

    particles2 = LatticeParticleGenerator(ds, pdims, le, re, new_field_list)
    particles2.assign_indices(function=new_indices)
    particles2.map_grid_fields_to_particles(new_field_dict)

    #Test lattice positions
    xpos = np.unique(particles2["io", "particle_position_x"])
    ypos = np.unique(particles2["io", "particle_position_y"])
    zpos = np.unique(particles2["io", "particle_position_z"])

    xpred = np.linspace(le[0], re[0], num=pdims[0], endpoint=True)
    ypred = np.linspace(le[1], re[1], num=pdims[1], endpoint=True)
    zpred = np.linspace(le[2], re[2], num=pdims[2], endpoint=True)

    assert_almost_equal(xpos, xpred)
    assert_almost_equal(ypos, ypred)
    assert_almost_equal(zpos, zpred)

    #Test the number of particles again
    particles2.apply_to_stream()
    particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid2, particles1.NumberOfParticles + particles2.NumberOfParticles

    [grid.field_data.clear() for grid in ds.index.grids]
    particles_per_grid2 = [
        len(grid["particle_position_x"]) for grid in ds.index.grids
    ]
    yield assert_equal, particles_per_grid2, particles1.NumberOfParticles + particles2.NumberOfParticles

    #Test the uniqueness of tags
    tags = np.concatenate([grid["particle_index"] for grid in ds.index.grids])
    tags.sort()
    yield assert_equal, tags, np.arange((np.product(pdims) + num_particles))

    # Test that the old particles have zero for the new field
    old_particle_temps = [
        grid["particle_gas_temperature"][:particles_per_grid1[i]]
        for i, grid in enumerate(ds.index.grids)
    ]
    test_zeros = [
        np.zeros((particles_per_grid1[i]))
        for i, grid in enumerate(ds.index.grids)
    ]
    yield assert_equal, old_particle_temps, test_zeros

    #Now dump all of these particle fields out into a dict
    pdata = {}
    dd = ds.all_data()
    for field in new_field_list:
        pdata[field] = dd[field]

    #Test the "from-list" generator and particle field clobber
    particles3 = FromListParticleGenerator(ds,
                                           num_particles + np.product(pdims),
                                           pdata)
    particles3.apply_to_stream(clobber=True)

    #Test the number of particles again
    particles_per_grid3 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid3, particles1.NumberOfParticles + particles2.NumberOfParticles
    particles_per_grid2 = [
        len(grid["particle_position_z"]) for grid in ds.index.grids
    ]
    yield assert_equal, particles_per_grid3, particles1.NumberOfParticles + particles2.NumberOfParticles
예제 #42
0
def test_sloshing():

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    prng = RandomState(0x4d3d3d3)

    ds = data_dir_load(gslr)
    A = 2000.
    exp_time = 1.0e4
    redshift = 0.1

    sphere = ds.sphere("c", (0.1, "Mpc"))
    sphere.set_field_parameter("X_H", 0.75)

    thermal_model = ThermalSourceModel("apec", 0.1, 11.0, 10000, Zmet=0.3,
                                       thermal_broad=False, prng=prng)
    photons1 = PhotonList.from_data_source(sphere, redshift, A, exp_time,
                                           thermal_model)

    return_photons = return_data(photons1.photons)

    nphots = 0
    for i in range(4):
        phots = PhotonList.from_data_source(sphere, redshift, A, 0.25*exp_time,
                                            thermal_model)

        phots.write_h5_file("split_photons_%d.h5" % i)
        nphots += len(phots.photons["energy"])

    merge_files(["split_photons_%d.h5" % i for i in range(4)],
                "merged_photons.h5", add_exposure_times=True,
                overwrite=True)

    merged_photons = PhotonList.from_file("merged_photons.h5")
    assert len(merged_photons.photons["energy"]) == nphots
    assert merged_photons.parameters["fid_exp_time"] == exp_time

    events1 = photons1.project_photons([1.0,-0.5,0.2], [30., 45.],
                                       absorb_model="tbabs", nH=0.1, prng=prng)

    return_events = return_data(events1.events)

    events1.write_spectrum("test_events_spec.fits", 0.2, 10.0, 2000)

    f = pyfits.open("test_events_spec.fits")
    return_spec = return_data(f["SPECTRUM"].data["COUNTS"])
    f.close()

    events1.write_fits_image("test_events_img.fits", (20.0, "arcmin"), 
                             1024)

    f = pyfits.open("test_events_img.fits")
    return_img = return_data(f[0].data)
    f.close()

    tests = [GenericArrayTest(ds, return_photons, args=["photons"]),
             GenericArrayTest(ds, return_events, args=["events"]),
             GenericArrayTest(ds, return_spec, args=["spec"]),
             GenericArrayTest(ds, return_img, args=["img"])]

    for test in tests:
        test_sloshing.__name__ = test.description
        yield test

    photons1.write_h5_file("test_photons.h5")
    events1.write_h5_file("test_events.h5")
    events1.write_fits_file("test_events.fits", 20.0, 1024)

    photons2 = PhotonList.from_file("test_photons.h5")
    events2 = EventList.from_h5_file("test_events.h5")
    events3 = EventList.from_fits_file("test_events.fits")

    for k in photons1.keys():
        if k == "energy":
            arr1 = uconcatenate(photons1[k])
            arr2 = uconcatenate(photons2[k])
        else:
            arr1 = photons1[k]
            arr2 = photons2[k]
        assert_array_equal(arr1, arr2)
    for k in events2.keys():
        assert_array_equal(events1[k], events2[k])
        assert_allclose(events2[k], events3[k], rtol=1.0e-6)

    nevents = 0

    for i in range(4):
        events = photons1.project_photons([1.0, -0.5, 0.2], [30., 45.],
                                          absorb_model="tbabs", nH=0.1,
                                          prng=prng)
        events.write_h5_file("split_events_%d.h5" % i)
        nevents += len(events["xsky"])

    merge_files(["split_events_%d.h5" % i for i in range(4)],
                "merged_events.h5", add_exposure_times=True,
                overwrite=True)

    merged_events = EventList.from_h5_file("merged_events.h5")
    assert len(merged_events["xsky"]) == nevents
    assert merged_events.parameters["exp_time"] == 4.0*exp_time

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
예제 #43
0
def _light_cone_projection(my_slice,
                           field,
                           pixels,
                           weight_field=None,
                           save_image=False,
                           field_cuts=None):
    "Create a single projection to be added into the light cone stack."

    # We are just saving the projection object, so only the projection axis
    # needs to be considered since the lateral shifting and tiling occurs after
    # the projection object is made.
    # Likewise, only the box_depth_fraction needs to be considered.

    mylog.info("Making projection at z = %f from %s." % \
               (my_slice["redshift"], my_slice["filename"]))

    region_center = [0.5 * (my_slice["object"].domain_right_edge[q] +
                            my_slice["object"].domain_left_edge[q]) \
                         for q in range(my_slice["object"].dimensionality)]

    # 1. The Depth Problem
    # Use coordinate field cut in line of sight to cut projection to proper depth.
    if field_cuts is None:
        these_field_cuts = []
    else:
        these_field_cuts = field_cuts.copy()

    if (my_slice["box_depth_fraction"] < 1):
        axis = ("x", "y", "z")[my_slice["projection_axis"]]
        depthLeft = \
          my_slice["projection_center"][my_slice["projection_axis"]] \
            - 0.5 * my_slice["box_depth_fraction"]
        depthRight = \
          my_slice["projection_center"][my_slice["projection_axis"]] \
            + 0.5 * my_slice["box_depth_fraction"]
        if (depthLeft < 0):
            cut_mask = (
                "((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= 0) & "
                " (obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= %f)) | "
                "((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= %f) & "
                " (obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= 1))") % \
                (axis, axis, axis, axis, depthRight,
                 axis, axis, (depthLeft+1), axis, axis)
        elif (depthRight > 1):
            cut_mask = (
                "((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= 0) & "
                "(obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= %f)) | "
                "((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= %f) & "
                "(obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= 1))") % \
                (axis, axis, axis, axis, (depthRight-1),
                 axis, axis, depthLeft, axis, axis)
        else:
            cut_mask = (
                "(obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= %f) & "
                "(obj['index', '%s'] - 0.5*obj['index', '%s'] <= %f)") % \
                (axis, axis, depthLeft, axis, axis, depthRight)

        these_field_cuts.append(cut_mask)

    data_source = my_slice["object"].all_data()
    cut_region = data_source.cut_region(these_field_cuts)

    # Make projection.
    proj = my_slice["object"].proj(field,
                                   my_slice["projection_axis"],
                                   weight_field,
                                   center=region_center,
                                   data_source=cut_region)
    proj_field = proj.field[0]

    del data_source, cut_region

    # 2. The Tile Problem
    # Tile projection to specified width.

    # Original projection data.
    original_px = proj.field_data["px"].in_units("code_length").copy()
    original_py = proj.field_data["py"].in_units("code_length").copy()
    original_pdx = proj.field_data["pdx"].in_units("code_length").copy()
    original_pdy = proj.field_data["pdy"].in_units("code_length").copy()
    original_field = proj.field_data[proj_field].copy()
    original_weight_field = proj.field_data["weight_field"].copy()

    for my_field in ["px", "py", "pdx", "pdy", proj_field, "weight_field"]:
        proj.field_data[my_field] = [proj.field_data[my_field]]

    # Copy original into offset positions to make tiles.
    for x in range(int(np.ceil(my_slice["box_width_fraction"]))):
        x = my_slice["object"].quan(x, "code_length")
        for y in range(int(np.ceil(my_slice["box_width_fraction"]))):
            y = my_slice["object"].quan(y, "code_length")
            if ((x + y) > 0):
                proj.field_data["px"] += [original_px + x]
                proj.field_data["py"] += [original_py + y]
                proj.field_data["pdx"] += [original_pdx]
                proj.field_data["pdy"] += [original_pdy]
                proj.field_data["weight_field"] += [original_weight_field]
                proj.field_data[proj_field] += [original_field]

    for my_field in ["px", "py", "pdx", "pdy", proj_field, "weight_field"]:
        proj.field_data[my_field] = \
          my_slice["object"].arr(proj.field_data[my_field]).flatten()

    # Delete originals.
    del original_px
    del original_py
    del original_pdx
    del original_pdy
    del original_field
    del original_weight_field

    # 3. The Shift Problem
    # Shift projection by random x and y offsets.

    image_axes = np.roll(np.arange(3), -my_slice["projection_axis"])[1:]
    di_left_x = my_slice["object"].domain_left_edge[image_axes[0]]
    di_right_x = my_slice["object"].domain_right_edge[image_axes[0]]
    di_left_y = my_slice["object"].domain_left_edge[image_axes[1]]
    di_right_y = my_slice["object"].domain_right_edge[image_axes[1]]

    offset = my_slice["projection_center"].copy() * \
      my_slice["object"].domain_width
    offset = np.roll(offset, -my_slice["projection_axis"])[1:]

    # Shift x and y positions.
    proj.field_data["px"] -= offset[0]
    proj.field_data["py"] -= offset[1]

    # Wrap off-edge cells back around to other side (periodic boundary conditions).
    proj.field_data["px"][proj.field_data["px"] < di_left_x] += \
      np.ceil(my_slice["box_width_fraction"]) * di_right_x
    proj.field_data["py"][proj.field_data["py"] < di_left_y] += \
      np.ceil(my_slice["box_width_fraction"]) * di_right_y

    # After shifting, some cells have fractional coverage on both sides of the box.
    # Find those cells and make copies to be placed on the other side.

    # Cells hanging off the right edge.
    add_x_right = proj.field_data["px"] + 0.5 * proj.field_data["pdx"] > \
      np.ceil(my_slice["box_width_fraction"]) * di_right_x
    add_x_px = proj.field_data["px"][add_x_right]
    add_x_px -= np.ceil(my_slice["box_width_fraction"]) * di_right_x
    add_x_py = proj.field_data["py"][add_x_right]
    add_x_pdx = proj.field_data["pdx"][add_x_right]
    add_x_pdy = proj.field_data["pdy"][add_x_right]
    add_x_field = proj.field_data[proj_field][add_x_right]
    add_x_weight_field = proj.field_data["weight_field"][add_x_right]
    del add_x_right

    # Cells hanging off the left edge.
    add_x_left = proj.field_data[
        "px"] - 0.5 * proj.field_data["pdx"] < di_left_x
    add2_x_px = proj.field_data["px"][add_x_left]
    add2_x_px += np.ceil(my_slice["box_width_fraction"]) * di_right_x
    add2_x_py = proj.field_data["py"][add_x_left]
    add2_x_pdx = proj.field_data["pdx"][add_x_left]
    add2_x_pdy = proj.field_data["pdy"][add_x_left]
    add2_x_field = proj.field_data[proj_field][add_x_left]
    add2_x_weight_field = proj.field_data["weight_field"][add_x_left]
    del add_x_left

    # Cells hanging off the top edge.
    add_y_right = proj.field_data["py"] + 0.5 * proj.field_data["pdy"] > \
      np.ceil(my_slice["box_width_fraction"]) * di_right_y
    add_y_px = proj.field_data["px"][add_y_right]
    add_y_py = proj.field_data["py"][add_y_right]
    add_y_py -= np.ceil(my_slice["box_width_fraction"]) * di_right_y
    add_y_pdx = proj.field_data["pdx"][add_y_right]
    add_y_pdy = proj.field_data["pdy"][add_y_right]
    add_y_field = proj.field_data[proj_field][add_y_right]
    add_y_weight_field = proj.field_data["weight_field"][add_y_right]
    del add_y_right

    # Cells hanging off the bottom edge.
    add_y_left = proj.field_data[
        "py"] - 0.5 * proj.field_data["pdy"] < di_left_y
    add2_y_px = proj.field_data["px"][add_y_left]
    add2_y_py = proj.field_data["py"][add_y_left]
    add2_y_py += np.ceil(my_slice["box_width_fraction"]) * di_right_y
    add2_y_pdx = proj.field_data["pdx"][add_y_left]
    add2_y_pdy = proj.field_data["pdy"][add_y_left]
    add2_y_field = proj.field_data[proj_field][add_y_left]
    add2_y_weight_field = proj.field_data["weight_field"][add_y_left]
    del add_y_left

    # Add the hanging cells back to the projection data.
    proj.field_data["px"] = uconcatenate(
        [proj.field_data["px"], add_x_px, add_y_px, add2_x_px, add2_y_px])
    proj.field_data["py"] = uconcatenate(
        [proj.field_data["py"], add_x_py, add_y_py, add2_x_py, add2_y_py])
    proj.field_data["pdx"] = uconcatenate(
        [proj.field_data["pdx"], add_x_pdx, add_y_pdx, add2_x_pdx, add2_y_pdx])
    proj.field_data["pdy"] = uconcatenate(
        [proj.field_data["pdy"], add_x_pdy, add_y_pdy, add2_x_pdy, add2_y_pdy])
    proj.field_data[proj_field] = uconcatenate([
        proj.field_data[proj_field], add_x_field, add_y_field, add2_x_field,
        add2_y_field
    ])
    proj.field_data["weight_field"] = uconcatenate([
        proj.field_data["weight_field"], add_x_weight_field,
        add_y_weight_field, add2_x_weight_field, add2_y_weight_field
    ])

    # Delete original copies of hanging cells.
    del add_x_px, add_y_px, add2_x_px, add2_y_px
    del add_x_py, add_y_py, add2_x_py, add2_y_py
    del add_x_pdx, add_y_pdx, add2_x_pdx, add2_y_pdx
    del add_x_pdy, add_y_pdy, add2_x_pdy, add2_y_pdy
    del add_x_field, add_y_field, add2_x_field, add2_y_field
    del add_x_weight_field, add_y_weight_field, add2_x_weight_field, add2_y_weight_field

    # Tiles were made rounding up the width to the nearest integer.
    # Cut off the edges to get the specified width.
    # Cut in the x direction.
    cut_x = proj.field_data["px"] - 0.5 * proj.field_data["pdx"] < \
      di_right_x * my_slice["box_width_fraction"]
    proj.field_data["px"] = proj.field_data["px"][cut_x]
    proj.field_data["py"] = proj.field_data["py"][cut_x]
    proj.field_data["pdx"] = proj.field_data["pdx"][cut_x]
    proj.field_data["pdy"] = proj.field_data["pdy"][cut_x]
    proj.field_data[proj_field] = proj.field_data[proj_field][cut_x]
    proj.field_data["weight_field"] = proj.field_data["weight_field"][cut_x]
    del cut_x

    # Cut in the y direction.
    cut_y = proj.field_data["py"] - 0.5 * proj.field_data["pdy"] < \
      di_right_y * my_slice["box_width_fraction"]
    proj.field_data["px"] = proj.field_data["px"][cut_y]
    proj.field_data["py"] = proj.field_data["py"][cut_y]
    proj.field_data["pdx"] = proj.field_data["pdx"][cut_y]
    proj.field_data["pdy"] = proj.field_data["pdy"][cut_y]
    proj.field_data[proj_field] = proj.field_data[proj_field][cut_y]
    proj.field_data["weight_field"] = proj.field_data["weight_field"][cut_y]
    del cut_y

    # Create fixed resolution buffer to return back to the light cone object.
    # These buffers will be stacked together to make the light cone.
    frb = FixedResolutionBuffer(
        proj, (di_left_x, di_right_x * my_slice["box_width_fraction"],
               di_left_y, di_right_y * my_slice["box_width_fraction"]),
        (pixels, pixels),
        antialias=False)

    return frb
예제 #44
0
    def generate_events(self, area, exp_time, angular_width,
                        source_model, sky_center, parameters=None,
                        velocity_fields=None, absorb_model=None,
                        nH=None, no_shifting=False, sigma_pos=None,
                        prng=None):
        """
        Generate projected events from a light cone simulation. 

        Parameters
        ----------
        area : float, (value, unit) tuple, or :class:`~yt.units.yt_array.YTQuantity`
            The collecting area to determine the number of events. If units are
            not specified, it is assumed to be in cm^2.
        exp_time : float, (value, unit) tuple, or :class:`~yt.units.yt_array.YTQuantity`
            The exposure time to determine the number of events. If units are
            not specified, it is assumed to be in seconds.
        angular_width : float, (value, unit) tuple, or :class:`~yt.units.yt_array.YTQuantity`
            The angular width of the light cone simulation. If units are not
            specified, it is assumed to be in degrees.
        source_model : :class:`~pyxsim.source_models.SourceModel`
            A source model used to generate the events.
        sky_center : array-like
            Center RA, Dec of the events in degrees.
        parameters : dict, optional
            A dictionary of parameters to be passed for the source model to use,
            if necessary.
        velocity_fields : list of fields
            The yt fields to use for the velocity. If not specified, the following will
            be assumed:
            ['velocity_x', 'velocity_y', 'velocity_z'] for grid datasets
            ['particle_velocity_x', 'particle_velocity_y', 'particle_velocity_z'] for particle datasets
        absorb_model : string or :class:`~pyxsim.spectral_models.AbsorptionModel` 
            A model for foreground galactic absorption, to simulate the absorption
            of events before being detected. This cannot be applied here if you 
            already did this step previously in the creation of the 
            :class:`~pyxsim.photon_list.PhotonList` instance. Known options for 
            strings are "wabs" and "tbabs".
        nH : float, optional
            The foreground column density in units of 10^22 cm^{-2}. Only used if
            absorption is applied.
        no_shifting : boolean, optional
            If set, the photon energies will not be Doppler shifted.
        sigma_pos : float, optional
            Apply a gaussian smoothing operation to the sky positions of the
            events. This may be useful when the binned events appear blocky due
            to their uniform distribution within simulation cells. However, this
            will move the events away from their originating position on the
            sky, and so may distort surface brightness profiles and/or spectra.
            Should probably only be used for visualization purposes. Supply a
            float here to smooth with a standard deviation with this fraction
            of the cell size. Default: None
        prng : integer or :class:`~numpy.random.RandomState` object
            A pseudo-random number generator. Typically will only be specified
            if you have a reason to generate the same set of random numbers, such as for a
            test. Default is to use the :mod:`numpy.random` module.
        """
        prng = parse_prng(prng)

        area = parse_value(area, "cm**2")
        exp_time = parse_value(exp_time, "s")
        aw = parse_value(angular_width, "deg")

        tot_events = defaultdict(list)

        for output in self.light_cone_solution:
            ds = load(output["filename"])
            ax = output["projection_axis"]
            c = output["projection_center"]*ds.domain_width + ds.domain_left_edge
            le = c.copy()
            re = c.copy()
            width = ds.quan(aw*output["box_width_per_angle"], "unitary").to("code_length")
            depth = ds.domain_width[ax].in_units("code_length")*output["box_depth_fraction"]
            le[ax] -= 0.5*depth
            re[ax] += 0.5*depth
            for off_ax in axes_lookup[ax]:
                le[off_ax] -= 0.5*width
                re[off_ax] += 0.5*width
            reg = ds.box(le, re)
            photons = PhotonList.from_data_source(reg, output['redshift'], area,
                                                  exp_time, source_model,
                                                  parameters=parameters,
                                                  center=c,
                                                  velocity_fields=velocity_fields,
                                                  cosmology=ds.cosmology)
            if sum(photons["num_photons"]) > 0:
                events = photons.project_photons("xyz"[ax], sky_center,
                                                 absorb_model=absorb_model, nH=nH,
                                                 no_shifting=no_shifting, 
                                                 sigma_pos=sigma_pos,
                                                 prng=prng)
                if events.num_events > 0:
                    tot_events["xsky"].append(events["xsky"])
                    tot_events["ysky"].append(events["ysky"])
                    tot_events["eobs"].append(events["eobs"])
                del events

            del photons

        parameters = {"exp_time": exp_time,
                      "area": area, 
                      "sky_center": YTArray(sky_center, "deg")}

        for key in tot_events:
            tot_events[key] = uconcatenate(tot_events[key])

        return EventList(tot_events, parameters)
예제 #45
0
def make_xrb_particles(data_source, age_field, scale_length, 
                       sfr_time_range=(1.0, "Gyr"), prng=None):
    r"""
    This routine generates an in-memory dataset composed of X-ray binary particles
    from an input data source containing star particles. 

    Parameters
    ----------
    data_source : :class:`~yt.data_objects.data_containers.YTSelectionContainer`
        The yt data source to obtain the data from, such as a sphere, box, disk, 
        etc.
    age_field : string or (type, name) field tuple
        The stellar age field. Must be in some kind of time units. 
    scale_length : string, (ftype, fname) tuple, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity`
        The radial length scale over which to scatter the XRB particles
        from their parent star particle. Can be the name of a smoothing
        length field for the stars, a (value, unit) tuple, or a YTQuantity.
    sfr_time_range : string, (ftype, fname) tuple, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity`, optional
        The recent time range over which to calculate the star formation rate from
        the current time in the dataset. Default: 1.0 Gyr
    prng : integer or :class:`~numpy.random.RandomState` object 
        A pseudo-random number generator. Typically will only be specified
        if you have a reason to generate the same set of random numbers, such as for a
        test. Default is to use the :mod:`numpy.random` module.
    """
    prng = parse_prng(prng)

    ds = data_source.ds

    ptype = data_source._determine_fields(age_field)[0][0]

    t = data_source[age_field].to("Gyr")
    m = data_source[(ptype, "particle_mass")].to("Msun")

    sfr_time_range = parse_value(sfr_time_range, "Gyr")

    recent = t < sfr_time_range

    n_recent = recent.sum()

    if n_recent == 0:
        sfr = 0.0
    else:
        sfr = (m[recent].sum()/sfr_time_range).to("Msun/yr").v

    mylog.info("%d star particles were formed in the last " % n_recent +
               "%s for a SFR of %4.1f Msun/yr." % (sfr_time_range, sfr))

    mtot = m.sum()

    npart = m.size

    scale_field = None
    if isinstance(scale_length, tuple):
        if isinstance(scale_length[0], string_types):
            scale_field = scale_length
    elif isinstance(scale_length, string_types):
        scale_field = (ptype, scale_length)

    if scale_field is None:
        if isinstance(scale_length, tuple):
            scale = YTArray([scale_length[0]]*npart, scale_length[1])
        elif isinstance(scale_length, YTQuantity):
            scale = YTArray([scale_length]*npart)
        else:
            scale = YTArray([scale_length[0]]*npart, "kpc")
    else:
        scale = data_source[scale_length]

    scale = scale.to('kpc').d

    N_l = lmxb_cdf(Lcut)*mtot.v*1.0e-11
    N_h = hmxb_cdf(Lcut)*sfr

    N_all = N_l+N_h

    if N_all == 0.0:
        raise RuntimeError("There are no X-ray binaries to generate!")

    # Compute conversion factors from luminosity to count rate

    lmxb_factor = get_scale_factor(alpha_lmxb, emin_lmxb, emax_lmxb)
    hmxb_factor = get_scale_factor(alpha_hmxb, emin_hmxb, emax_hmxb)

    xp = []
    yp = []
    zp = []
    vxp = []
    vyp = []
    vzp = []
    lp = []
    rp = []
    ap = []

    if N_l > 0.0:

        F_l = np.zeros(nbins+1)
        for i in range(1, nbins+1):
            F_l[i] = lmxb_cdf(Lbins[i]) 
        F_l /= F_l[-1]
        invcdf_l = InterpolatedUnivariateSpline(F_l, logLbins)

        n_l = prng.poisson(lam=N_l*m/mtot)

        mylog.info("Number of low-mass X-ray binaries: %s" % n_l.sum())

        for i, n in enumerate(n_l):
            if n > 0:
                randvec = prng.uniform(size=n)
                l = YTArray(10**invcdf_l(randvec)*1.0e38, "erg/s")
                r = YTArray(l.v*lmxb_factor, "photons/s/keV")
                # Now convert output luminosities to bolometric
                l *= bc_lmxb
                x = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                y = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                z = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                x += data_source[ptype, "particle_position_x"][i].to("kpc")
                y += data_source[ptype, "particle_position_y"][i].to("kpc")
                z += data_source[ptype, "particle_position_z"][i].to("kpc")
                vx = YTArray([data_source[ptype, "particle_velocity_x"][i]]*n).to('km/s')
                vy = YTArray([data_source[ptype, "particle_velocity_y"][i]]*n).to('km/s')
                vz = YTArray([data_source[ptype, "particle_velocity_z"][i]]*n).to('km/s')
                xp.append(x)
                yp.append(y)
                zp.append(z)
                vxp.append(vx)
                vyp.append(vy)
                vzp.append(vz)
                lp.append(l)
                rp.append(r)
                ap.append(np.array([alpha_lmxb]*n))

    if N_h > 0.0:

        F_h = np.zeros(nbins+1)
        for i in range(1, nbins+1):
            F_h[i] = hmxb_cdf(Lbins[i])
        F_h /= F_h[-1]
        invcdf_h = InterpolatedUnivariateSpline(F_h, logLbins)

        n_h = prng.poisson(lam=N_h*m/mtot)

        mylog.info("Number of high-mass X-ray binaries: %s" % n_h.sum())

        for i, n in enumerate(n_h):
            if n > 0:
                randvec = prng.uniform(size=n)
                l = YTArray(10**invcdf_h(randvec)*1.0e38, "erg/s")
                r = YTArray(l.v*hmxb_factor, "photons/s/keV")
                # Now convert output luminosities to bolometric
                l *= bc_hmxb
                x = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                y = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                z = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                x += data_source[ptype, "particle_position_x"][i].to("kpc")
                y += data_source[ptype, "particle_position_y"][i].to("kpc")
                z += data_source[ptype, "particle_position_z"][i].to("kpc")
                vx = YTArray([data_source[ptype, "particle_velocity_x"][i]]*n).to('km/s')
                vy = YTArray([data_source[ptype, "particle_velocity_y"][i]]*n).to('km/s')
                vz = YTArray([data_source[ptype, "particle_velocity_z"][i]]*n).to('km/s')
                xp.append(x)
                yp.append(y)
                zp.append(z)
                vxp.append(vx)
                vyp.append(vy)
                vzp.append(vz)
                lp.append(l)
                rp.append(r)
                ap.append(np.array([alpha_hmxb]*n))

    xp = uconcatenate(xp)
    yp = uconcatenate(yp)
    zp = uconcatenate(zp)
    vxp = uconcatenate(vxp)
    vyp = uconcatenate(vyp)
    vzp = uconcatenate(vzp)
    lp = uconcatenate(lp)
    rp = uconcatenate(rp)
    ap = uconcatenate(ap)

    data = {"particle_position_x": (xp.d, str(xp.units)),
            "particle_position_y": (yp.d, str(yp.units)),
            "particle_position_z": (zp.d, str(zp.units)),
            "particle_velocity_x": (vxp.d, str(vxp.units)),
            "particle_velocity_y": (vyp.d, str(vyp.units)),
            "particle_velocity_z": (vzp.d, str(vzp.units)),
            "particle_luminosity": (lp.d, str(lp.units)),
            "particle_count_rate": (rp.d, str(rp.units)),
            "particle_spectral_index": ap}

    dle = ds.domain_left_edge.to("kpc").v
    dre = ds.domain_right_edge.to("kpc").v

    bbox = np.array([[dle[i], dre[i]] for i in range(3)])

    new_ds = load_particles(data, bbox=bbox, length_unit="kpc",
                            time_unit="Myr", mass_unit="Msun", 
                            velocity_unit="km/s")

    return new_ds