コード例 #1
0
 def recv_array(self, source, tag = 0):
     metadata = self.comm.recv(source=source, tag=tag)
     dt, ne = metadata[:2]
     if ne is None and dt is None:
         return self.comm.recv(source=source, tag=tag)
     arr = np.empty(ne, dtype=dt)
     if len(metadata) == 4:
         registry = UnitRegistry(lut=metadata[3], add_default_symbols=False)
         arr = YTArray(arr, metadata[2], registry=registry)
     tmp = arr.view(self.__tocast)
     self.comm.Recv([tmp, MPI.CHAR], source=source, tag=tag)
     return arr
コード例 #2
0
def test_copy():
    quan = YTQuantity(1, 'g')
    arr = YTArray([1, 2, 3], 'cm')

    yield assert_equal, copy.copy(quan), quan
    yield assert_array_equal, copy.copy(arr), arr

    yield assert_equal,  copy.deepcopy(quan), quan
    yield assert_array_equal, copy.deepcopy(arr), arr

    yield assert_equal, quan.copy(), quan
    yield assert_array_equal, arr.copy(), arr

    yield assert_equal, np.copy(quan), quan
    yield assert_array_equal, np.copy(arr), arr
コード例 #3
0
ファイル: point_sources.py プロジェクト: jzuhone/pyxsim
def make_point_sources(area, exp_time, positions, sky_center,
                       spectra, prng=None):
    r"""
    Create a new :class:`~pyxsim.event_list.EventList` which contains
    point sources.

    Parameters
    ----------
    area : float, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity`
        The collecting area to determine the number of events. If units are
        not specified, it is assumed to be in cm^2.
    exp_time : float, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity`
        The exposure time to determine the number of events. If units are
        not specified, it is assumed to be in seconds.
    positions : array of source positions, shape 2xN
        The positions of the point sources in RA, Dec, where N is the
        number of point sources. Coordinates should be in degrees.
    sky_center : array-like
        Center RA, Dec of the events in degrees.
    spectra : list (size N) of :class:`~soxs.spectra.Spectrum` objects
        The spectra for the point sources, where N is the number 
        of point sources. Assumed to be in the observer frame.
    prng : integer or :class:`~numpy.random.RandomState` object 
        A pseudo-random number generator. Typically will only be specified
        if you have a reason to generate the same set of random numbers, such as for a
        test. Default is to use the :mod:`numpy.random` module.
    """
    prng = parse_prng(prng)

    spectra = ensure_list(spectra)
    positions = ensure_list(positions)

    area = parse_value(area, "cm**2")
    exp_time = parse_value(exp_time, "s")

    t_exp = exp_time.value/comm.size

    x = []
    y = []
    e = []

    for pos, spectrum in zip(positions, spectra):
        eobs = spectrum.generate_energies(t_exp, area.value, prng=prng)
        ne = eobs.size
        x.append(YTArray([pos[0]] * ne, "deg"))
        y.append(YTArray([pos[1]] * ne, "deg"))
        e.append(YTArray.from_astropy(eobs))

    parameters = {"sky_center": YTArray(sky_center, "degree"),
                  "exp_time": exp_time,
                  "area": area}

    events = {}
    events["xsky"] = uconcatenate(x)
    events["ysky"] = uconcatenate(y)
    events["eobs"] = uconcatenate(e)

    return EventList(events, parameters)
コード例 #4
0
def test_convenience():

    arr = YTArray([1, 2, 3], 'cm')

    yield assert_equal, arr.unit_quantity, YTQuantity(1, 'cm')
    yield assert_equal, arr.uq, YTQuantity(1, 'cm')
    yield assert_isinstance, arr.unit_quantity, YTQuantity
    yield assert_isinstance, arr.uq, YTQuantity

    yield assert_array_equal, arr.unit_array, YTArray(np.ones_like(arr), 'cm')
    yield assert_array_equal, arr.ua, YTArray(np.ones_like(arr), 'cm')
    yield assert_isinstance, arr.unit_array, YTArray
    yield assert_isinstance, arr.ua, YTArray

    yield assert_array_equal, arr.ndview, arr.view(np.ndarray)
    yield assert_array_equal, arr.d, arr.view(np.ndarray)
    yield assert_true, arr.ndview.base is arr.base
    yield assert_true, arr.d.base is arr.base

    yield assert_array_equal, arr.value, np.array(arr)
    yield assert_array_equal, arr.v, np.array(arr)
コード例 #5
0
 def alltoallv_array(self, send, total_size, offsets, sizes):
     if len(send.shape) > 1:
         recv = []
         for i in range(send.shape[0]):
             recv.append(self.alltoallv_array(send[i,:].copy(),
                                              total_size, offsets, sizes))
         recv = np.array(recv)
         return recv
     offset = offsets[self.comm.rank]
     tmp_send = send.view(self.__tocast)
     recv = np.empty(total_size, dtype=send.dtype)
     if isinstance(send, YTArray):
         # We assume send.units is consitent with the units
         # on the receiving end.
         recv = YTArray(recv, send.units)
     recv[offset:offset+send.size] = send[:]
     dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
     roff = [off * dtr for off in offsets]
     rsize = [siz * dtr for siz in sizes]
     tmp_recv = recv.view(self.__tocast)
     self.comm.Allgatherv((tmp_send, tmp_send.size, MPI.CHAR),
                               (tmp_recv, (rsize, roff), MPI.CHAR))
     return recv
コード例 #6
0
def test_comparisons():
    """
    Test numpy ufunc comparison operators for unit consistency.

    """
    from yt.units.yt_array import YTArray

    a1 = YTArray([1, 2, 3], 'cm')
    a2 = YTArray([2, 1, 3], 'cm')
    a3 = YTArray([.02, .01, .03], 'm')

    ops = (
        np.less,
        np.less_equal,
        np.greater,
        np.greater_equal,
        np.equal,
        np.not_equal
    )

    answers = (
        [True, False, False],
        [True, False, True],
        [False, True, False],
        [False, True, True],
        [False, False, True],
        [True, True, False],
    )

    for op, answer in zip(ops, answers):
        yield operate_and_compare, a1, a2, op, answer

    for op in ops:
        yield assert_raises, YTUfuncUnitError, op, a1, a3

    for op, answer in zip(ops, answers):
        yield operate_and_compare, a1, a3.in_units('cm'), op, answer
コード例 #7
0
 def recv_array(self, source, tag = 0):
     metadata = self.comm.recv(source=source, tag=tag)
     dt, ne = metadata[:2]
     if ne is None and dt is None:
         return self.comm.recv(source=source, tag=tag)
     arr = np.empty(ne, dtype=dt)
     if len(metadata) == 5:
         registry = UnitRegistry(lut=metadata[3], add_default_symbols=False)
         if metadata[-1] == "ImageArray":
             arr = ImageArray(arr, units=metadata[2],
                              registry=registry)
         else:
             arr = YTArray(arr, metadata[2], registry=registry)
     tmp = arr.view(self.__tocast)
     self.comm.Recv([tmp, MPI.CHAR], source=source, tag=tag)
     return arr
コード例 #8
0
 def _particle_spherical_position_phi(field, data):
     """
     Phi component of the particles' position vectors in spherical coords
     on the provided field parameters for 'normal', 'center', and 
     'bulk_velocity', 
     """
     normal = data.get_field_parameter('normal')
     center = data.get_field_parameter('center')
     bv = data.get_field_parameter("bulk_velocity")
     pos = spos
     pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
     theta = get_sph_theta(pos, center)
     phi = get_sph_phi(pos, center)
     pos = pos - np.reshape(center, (3, 1))
     sphp = get_sph_phi_component(pos, phi, normal)
     return sphp
コード例 #9
0
    def _photon_emissivity_field(field, data):
        dd = {
            "log_nH": np.log10(data[ftype, "H_nuclei_density"]),
            "log_T": np.log10(data[ftype, "temperature"])
        }

        my_emissivity = np.power(10, emp_0(dd))
        if metallicity is not None:
            if isinstance(metallicity, DerivedField):
                my_Z = data[metallicity.name].to("Zsun")
            else:
                my_Z = metallicity
            my_emissivity += my_Z * np.power(10, emp_Z(dd))

        return data[ftype, "norm_field"] * \
            YTArray(my_emissivity, "photons*cm**3/s")
コード例 #10
0
 def fcoords_vertex(self):
     nodes_per_elem = self.dobj.index.meshes[0].connectivity_indices.shape[
         1]
     dim = self.dobj.ds.dimensionality
     ci = np.empty((self.data_size, nodes_per_elem, dim), dtype='float64')
     ci = YTArray(ci,
                  input_units="code_length",
                  registry=self.dobj.ds.unit_registry)
     if self.data_size == 0: return ci
     ind = 0
     for obj in self.objs:
         c = obj.select_fcoords_vertex(self.dobj)
         if c.shape[0] == 0: continue
         ci[ind:ind + c.shape[0], :, :] = c
         ind += c.shape[0]
     return ci
コード例 #11
0
    def from_file(cls, filename):
        """
        Generate a FITSImageData instance from one previously written to 
        disk.

        Parameters
        ----------
        filename : string
            The name of the file to open.
        """
        f = _astropy.pyfits.open(filename)
        data = {}
        for hdu in f:
            data[hdu.header["btype"]] = YTArray(hdu.data, hdu.header["bunit"])
        f.close()
        return cls(data, wcs=_astropy.pywcs.WCS(header=hdu.header))
コード例 #12
0
    def _photon_emissivity_field(field, data):
        dd = {
            "log_nH": np.log10(data["gas", "H_number_density"]),
            "log_T": np.log10(data["gas", "temperature"])
        }

        my_emissivity = np.power(10, emp_0(dd))
        if emp_Z is not None:
            if with_metals:
                my_Z = data["gas", "metallicity"]
            elif constant_metallicity is not None:
                my_Z = constant_metallicity
            my_emissivity += my_Z * np.power(10, emp_Z(dd))

        return data["gas","H_number_density"]**2 * \
            YTArray(my_emissivity, "photons*cm**3/s")
コード例 #13
0
 def __init__(self, lambda_min, lambda_max, n_lambda):
     self.n_lambda = int(n_lambda)
     # lambda, flux, and tau are wavelength, flux, and optical depth
     self.lambda_min = lambda_min
     self.lambda_max = lambda_max
     self.lambda_field = YTArray(np.linspace(lambda_min, lambda_max,
                                 n_lambda), "angstrom")
     self.tau_field = None
     self.flux_field = None
     self.absorbers_list = None
     # a dictionary that will store spectral quantities for each index in the light ray
     self.line_observables_dict = None
     self.bin_width = YTQuantity((lambda_max - lambda_min) /
                                 float(n_lambda - 1), "angstrom")
     self.line_list = []
     self.continuum_list = []
     self.snr = 100  # default signal to noise ratio for error estimation
コード例 #14
0
def get_sph_theta_component(vectors, theta, phi, normal):
    # The theta component of a vector is the vector dotted with thetahat
    normal = normalize_vector(normal)
    (zprime, xprime, yprime) = ortho_find(normal)

    res_xprime = resize_vector(xprime, vectors)
    res_yprime = resize_vector(yprime, vectors)
    res_zprime = resize_vector(zprime, vectors)

    tile_shape = [1] + list(vectors.shape)[1:]
    Jx, Jy, Jz = (YTArray(np.tile(rprime, tile_shape), "")
                  for rprime in (res_xprime, res_yprime, res_zprime))

    thetahat = (Jx * np.cos(theta) * np.cos(phi) +
                Jy * np.cos(theta) * np.sin(phi) - Jz * np.sin(theta))

    return np.sum(vectors * thetahat, axis=0)
コード例 #15
0
def get_params(ds):
    return dict(
        axis=0,
        center=YTArray((0.0, 0.0, 0.0), "cm", registry=ds.unit_registry),
        bulk_velocity=YTArray((0.0, 0.0, 0.0), "cm/s", registry=ds.unit_registry),
        bulk_magnetic_field=YTArray((0.0, 0.0, 0.0), "G", registry=ds.unit_registry),
        normal=YTArray((0.0, 0.0, 1.0), "", registry=ds.unit_registry),
        cp_x_vec=YTArray((1.0, 0.0, 0.0), "", registry=ds.unit_registry),
        cp_y_vec=YTArray((0.0, 1.0, 0.0), "", registry=ds.unit_registry),
        cp_z_vec=YTArray((0.0, 0.0, 1.0), "", registry=ds.unit_registry),
        omega_baryon=0.04,
        observer_redshift=0.0,
        source_redshift=3.0,
        virial_radius=YTQuantity(1.0, "cm"),
    )
コード例 #16
0
    def _emissivity_field(field, data):
        with np.errstate(all='ignore'):
            dd = {"log_nH": np.log10(data["gas", "H_nuclei_density"]),
                  "log_T": np.log10(data["gas", "temperature"])}

        my_emissivity = np.power(10, em_0(dd))
        if metallicity is not None:
            if isinstance(metallicity, DerivedField):
                my_Z = data[metallicity.name]
            else:
                my_Z = metallicity
            my_emissivity += my_Z * np.power(10, em_Z(dd))

        my_emissivity[np.isnan(my_emissivity)] = 0

        return data["gas","H_nuclei_density"]**2 * \
            YTArray(my_emissivity, "erg*cm**3/s")
コード例 #17
0
 def mpi_bcast(self, data, root=0):
     # The second check below makes sure that we know how to communicate
     # this type of array. Otherwise, we'll pickle it.
     if isinstance(data, np.ndarray) and get_mpi_type(
             data.dtype) is not None:
         if self.comm.rank == root:
             if isinstance(data, YTArray):
                 info = (
                     data.shape,
                     data.dtype,
                     str(data.units),
                     data.units.registry.lut,
                 )
                 if isinstance(data, ImageArray):
                     info += ("ImageArray", )
                 else:
                     info += ("YTArray", )
             else:
                 info = (data.shape, data.dtype)
         else:
             info = ()
         info = self.comm.bcast(info, root=root)
         if self.comm.rank != root:
             if len(info) == 5:
                 registry = UnitRegistry(lut=info[3],
                                         add_default_symbols=False)
                 if info[-1] == "ImageArray":
                     data = ImageArray(
                         np.empty(info[0], dtype=info[1]),
                         units=info[2],
                         registry=registry,
                     )
                 else:
                     data = YTArray(np.empty(info[0], dtype=info[1]),
                                    info[2],
                                    registry=registry)
             else:
                 data = np.empty(info[0], dtype=info[1])
         mpi_type = get_mpi_type(info[1])
         self.comm.Bcast([data, mpi_type], root=root)
         return data
     else:
         # Use pickled methods.
         data = self.comm.bcast(data, root=root)
         return data
コード例 #18
0
def test_yt_array_yt_quantity_ops():
    """
    Test operations that combine YTArray and YTQuantity
    """
    a = YTArray(range(10, 1), 'cm')
    b = YTQuantity(5, 'g')

    assert_isinstance(a * b, YTArray)
    assert_isinstance(b * a, YTArray)

    assert_isinstance(a / b, YTArray)
    assert_isinstance(b / a, YTArray)

    assert_isinstance(a * a, YTArray)
    assert_isinstance(a / a, YTArray)

    assert_isinstance(b * b, YTQuantity)
    assert_isinstance(b / b, YTQuantity)
コード例 #19
0
def test_h5_io():
    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    ds = fake_random_ds(64, nprocs=1, length_unit=10)

    warr = ds.arr(np.random.random((256, 256)), 'code_length')

    warr.write_hdf5('test.h5')

    iarr = YTArray.from_hdf5('test.h5')

    yield assert_equal, warr, iarr
    yield assert_equal, warr.units.registry['code_length'], iarr.units.registry['code_length']

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
コード例 #20
0
def test_yt_array_yt_quantity_ops():
    """
    Test operations that combine YTArray and YTQuantity
    """
    a = YTArray(range(10), 'cm')
    b = YTQuantity(5, 'g')

    yield assert_isinstance, a * b, YTArray
    yield assert_isinstance, b * a, YTArray

    yield assert_isinstance, a / b, YTArray
    yield assert_isinstance, b / a, YTArray

    yield assert_isinstance, a * a, YTArray
    yield assert_isinstance, a / a, YTArray

    yield assert_isinstance, b * b, YTQuantity
    yield assert_isinstance, b / b, YTQuantity
コード例 #21
0
def _flatten_dict_list(data, exceptions=None):
    """
    _flatten_dict_list(data, exceptions=None)

    Flatten the list of dicts into one dict.
    """

    if exceptions is None: exceptions = []
    new_data = {}
    for datum in data:
        for field in [field for field in datum.keys()
                      if field not in exceptions]:
            if field not in new_data:
                new_data[field] = []
            new_data[field].extend(datum[field])
    for field in new_data:
        new_data[field] = YTArray(new_data[field])
    return new_data
コード例 #22
0
ファイル: loader.py プロジェクト: Hoptune/caesar
class LazyDataset:
    """A lazily-loaded HDF5 dataset"""
    def __init__(self, obj, dataset_path):
        self._obj = obj
        self._dataset_path = dataset_path
        self._data = None

    def __getitem__(self, index):
        if self._data is None:
            with h5py.File(self._obj.data_file, 'r') as hd:
                dataset = hd[self._dataset_path]
                if 'unit' in dataset.attrs:
                    self._data = YTArray(dataset[:],
                                         dataset.attrs['unit'],
                                         registry=self._obj.unit_registry)
                else:
                    self._data = dataset[:]
        return self._data.__getitem__(index)
コード例 #23
0
    def _unpack(self, obj, hd):
        import six
        if 'simulation_attributes' not in hd.keys():
            return
        from yt.units.yt_array import YTArray
        
        hdd = hd['simulation_attributes']
        for k,v in six.iteritems(hdd.attrs):
            setattr(self, k, v)

        uhdd = hdd['units']
        for k,v in six.iteritems(uhdd.attrs):
            setattr(self, k, YTArray(getattr(self, k), v, registry=obj.unit_registry))

        phdd = hdd['parameters']
        self.parameters = {}
        for k,v in six.iteritems(phdd.attrs):
            self.parameters[k] = v
コード例 #24
0
def get_sph_r_component(vectors, theta, phi, normal):
    # The r component of a vector is the vector dotted with rhat
    normal = normalize_vector(normal)
    (xprime, yprime, zprime) = get_ortho_basis(normal)

    res_xprime = resize_vector(xprime, vectors)
    res_yprime = resize_vector(yprime, vectors)
    res_zprime = resize_vector(zprime, vectors)

    tile_shape = [1] + list(vectors.shape)[1:]

    Jx, Jy, Jz = (YTArray(np.tile(rprime, tile_shape), "")
                  for rprime in (res_xprime, res_yprime, res_zprime))

    rhat = Jx*np.sin(theta)*np.cos(phi) + \
           Jy*np.sin(theta)*np.sin(phi) + \
           Jz*np.cos(theta)

    return np.sum(vectors * rhat, axis=0)
コード例 #25
0
 def build_dist(self):
     """
     Build the data for plotting.
     """
     # Pick out the stars.
     if self.filter_provided:
         ct = self._filter['creation_time']
         mass_stars = self._data_source[self._filter, "particle_mass"]
     else:
         if self.ds_provided:
             ct = self._data_source['creation_time']
             if ct is None:
                 errmsg = 'data source must have particle_age!'
                 mylog.error(errmsg)
                 raise RuntimeError(errmsg)
             mask = ct > 0
             if not any(mask):
                 errmsg = 'all particles have age < 0'
                 mylog.error(errmsg)
                 raise RuntimeError(errmsg)
             # type = self._data_source['particle_type']
             ct_stars = ct[mask]
             mass_stars = self._data_source['particle_mass'][mask].in_units(
                 'Msun')
             del mask
         else:
             ct_stars = self.star_creation_time
             mass_stars = self.star_mass
     # Find the oldest stars in units of code time.
     tmin = ct_stars.min().in_units("s")
     # Multiply the end to prevent numerical issues.
     self.time_bins = np.linspace(tmin * 1.01,
                                  self._ds.current_time.in_units("s"),
                                  num=self.bin_count + 1)
     # Figure out which bins the stars go into.
     inds = np.digitize(ct_stars.in_units("s"), self.time_bins) - 1
     # Sum up the stars created in each time bin.
     self.mass_bins = YTArray(np.zeros(self.bin_count + 1, dtype='float64'),
                              "Msun")
     for index in np.unique(inds):
         self.mass_bins[index] += (mass_stars[inds == index]).sum()
     # We will want the time taken between bins.
     self.time_bins_dt = self.time_bins[1:] - self.time_bins[:-1]
コード例 #26
0
def test_h5_io():
    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    ds = fake_random_ds(64, nprocs=1, length_unit=10)

    warr = ds.arr(np.random.random((256, 256)), 'code_length')

    warr.write_hdf5('test.h5')

    iarr = YTArray.from_hdf5('test.h5')

    yield assert_equal, warr, iarr
    yield assert_equal, warr.units.registry[
        'code_length'], iarr.units.registry['code_length']

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
コード例 #27
0
 def __init__(self,
              model_name,
              nH,
              emin=0.01,
              emax=50.0,
              nchan=100000,
              settings=None):
     mylog.warning("XSpecAbsorbModel is deprecated and will be removed "
                   "in a future release. Use of the other models is "
                   "suggested.")
     self.model_name = model_name
     self.nH = YTQuantity(nH * 1.0e22, "cm**-2")
     if settings is None: settings = {}
     self.settings = settings
     self.emin = emin
     self.emax = emax
     self.nchan = nchan
     ebins = np.linspace(emin, emax, nchan + 1)
     self.emid = YTArray(0.5 * (ebins[1:] + ebins[:-1]), "keV")
コード例 #28
0
class LazyDataset:
    """A lazily-loaded HDF5 dataset"""
    def __init__(self, obj, dataset_path):
        self._obj = obj
        self._dataset_path = dataset_path
        self._data = None

    def __getitem__(self, index):
        if self._data is None:
            with h5py.File(self._obj.data_file, 'r') as hd:
                if self._dataset_path[:9] == "tree_data":
                    if isinstance(hd[self._dataset_path],
                                  h5py.Dataset):  # old prgen tree
                        self._data = hd[self._dataset_path][:]
                    else:  # new one
                        if 'galaxy' in self._dataset_path.split('_'):
                            self._data = [
                                hd[self._dataset_path + '/%d' % i][:]
                                for i in range(self._obj.ngalaxies)
                            ]
                        elif 'halo' in self._dataset_path.split('_'):
                            self._data = [
                                hd[self._dataset_path + '/%d' % i][:]
                                for i in range(self._obj.nhalos)
                            ]
                        elif 'cloud' in self._dataset_path.split('_'):
                            self._data = [
                                hd[self._dataset_path + '/%d' % i][:]
                                for i in range(self._obj.ncloud)
                            ]
                        else:
                            raise ValueError('The data set path not correct!!',
                                             self._dataset_path)
                else:
                    dataset = hd[self._dataset_path]
                    if 'unit' in dataset.attrs:
                        self._data = YTArray(dataset[:],
                                             dataset.attrs['unit'],
                                             registry=self._obj.unit_registry)
                    else:
                        self._data = dataset[:]
        return self._data.__getitem__(index)
コード例 #29
0
ファイル: event_list.py プロジェクト: ilaudy/pyxsim
    def _add_events(self, ebins, spectrum, prng, absorb_model):
        exp_time = self.parameters["ExposureTime"]
        area = self.parameters["Area"]
        flux = spectrum.sum()
        num_photons = prng.poisson(lam=exp_time*area*flux)
        cumspec = np.cumsum(spectrum)
        cumspec = np.insert(cumspec, 0, 0.0)
        cumspec /= cumspec[-1]
        randvec = prng.uniform(size=num_photons)
        randvec.sort()
        e = YTArray(np.interp(randvec, cumspec, ebins), "keV")

        if absorb_model is None:
            detected = np.ones(e.shape, dtype='bool')
        else:
            detected = absorb_model.absorb_photons(e, prng=prng)

        mylog.info("Adding %d new events." % detected.sum())

        return e[detected]
コード例 #30
0
    def _create_lambda_field(self, lambda_min, lambda_max, n_lambda,
                             units=None):
        """
        Create a lambda array with units.
        """

        if units is None:
            units = _bin_space_units[self.bin_space]

        if isinstance(lambda_min, YTQuantity):
            my_min = lambda_min.d
        else:
            my_min = lambda_min

        if isinstance(lambda_max, YTQuantity):
            my_max = lambda_max.d
        else:
            my_max = lambda_max

        return YTArray(np.linspace(my_min, my_max, n_lambda), units)
コード例 #31
0
    def test_halo_catalog(self):
        rs = np.random.RandomState(3670474)
        n_halos = 100
        fields = ['particle_%s' % name for name in
                  ['mass'] + ['position_%s' % ax for ax in 'xyz']]
        units = ['g'] + ['cm']*3
        data = dict((field, YTArray(rs.random_sample(n_halos), unit))
                    for field, unit in zip(fields, units))

        fn = fake_halo_catalog(data)
        ds = yt_load(fn)

        assert isinstance(ds, HaloCatalogDataset)

        for field in fields:
            f1 = data[field].in_base()
            f1.sort()
            f2 = ds.r[field].in_base()
            f2.sort()
            assert_array_equal(f1, f2)
コード例 #32
0
ファイル: spectral_models.py プロジェクト: NegriAndrea/pyxsim
 def get_spectrum(self, kT):
     """
     Get the thermal emission spectrum given a temperature *kT* in keV. 
     """
     tindex = np.searchsorted(self.Tvals, kT) - 1
     if tindex >= self.Tvals.shape[0] - 1 or tindex < 0:
         return (YTArray(np.zeros(self.nchan), "cm**3/s"), ) * 2
     dT = (kT - self.Tvals[tindex]) / self.dTvals[tindex]
     cspec_l = self.cosmic_spec[tindex, :]
     mspec_l = self.metal_spec[tindex, :]
     cspec_r = self.cosmic_spec[tindex + 1, :]
     mspec_r = self.metal_spec[tindex + 1, :]
     cosmic_spec = cspec_l * (1. - dT) + cspec_r * dT
     metal_spec = mspec_l * (1. - dT) + mspec_r * dT
     var_spec = None
     if self.var_spec is not None:
         vspec_l = self.var_spec[:, tindex, :]
         vspec_r = self.var_spec[:, tindex + 1, :]
         var_spec = vspec_l * (1. - dT) + vspec_r * dT
     return cosmic_spec, metal_spec, var_spec
コード例 #33
0
ファイル: test_fields.py プロジェクト: Xarthisius/yt-drone
def get_params(ds):
    return dict(
        axis=0,
        center=YTArray((0.0, 0.0, 0.0), "cm", registry=ds.unit_registry),
        bulk_velocity=YTArray((0.0, 0.0, 0.0),
                              "cm/s",
                              registry=ds.unit_registry),
        normal=YTArray((0.0, 0.0, 1.0), "", registry=ds.unit_registry),
        cp_x_vec=YTArray((1.0, 0.0, 0.0), "", registry=ds.unit_registry),
        cp_y_vec=YTArray((0.0, 1.0, 0.0), "", registry=ds.unit_registry),
        cp_z_vec=YTArray((0.0, 0.0, 1.0), "", registry=ds.unit_registry),
        omega_baryon=0.04,
        observer_redshift=0.0,
        source_redshift=3.0,
    )
コード例 #34
0
def test_old_nonspatial_data():
    ds = data_dir_load(enzotiny)
    region = ds.box([0.25] * 3, [0.75] * 3)
    sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
    my_data = {}
    my_data["region_density"] = region["density"]
    my_data["sphere_density"] = sphere["density"]
    fn = "test_data.h5"
    full_fn = os.path.join(ytdata_dir, fn)
    array_ds = data_dir_load(full_fn)
    compare_unit_attributes(ds, array_ds)
    assert isinstance(array_ds, YTNonspatialDataset)
    yield YTDataFieldTest(full_fn, "region_density", geometric=False)
    yield YTDataFieldTest(full_fn, "sphere_density", geometric=False)

    my_data = {"density": YTArray(np.linspace(1., 20., 10), "g/cm**3")}
    fn = "random_data.h5"
    full_fn = os.path.join(ytdata_dir, fn)
    new_ds = data_dir_load(full_fn)
    assert isinstance(new_ds, YTNonspatialDataset)
    yield YTDataFieldTest(full_fn, "density", geometric=False)
コード例 #35
0
def _hdf5_yt_attr(fh, attr, unit_registry=None):
    """
    Read an hdf5 attribute.  If there exists another attribute
    named <attr>_units, use that to assign units and return
    as either a YTArray or YTQuantity.
    """
    val = fh.attrs[attr]
    units = ""
    ufield = "%s_units" % attr
    if ufield in fh.attrs:
        units = fh.attrs[ufield]
        if isinstance(units, bytes):
            units = units.decode("utf")
    if units == "dimensionless":
        units = ""
    if units != "":
        if isinstance(val, np.ndarray):
            val = YTArray(val, units, registry=unit_registry)
        else:
            val = YTQuantity(val, units, registry=unit_registry)
    return val
コード例 #36
0
def restore_object_dicts(obj_list, hd, unit_reg):
    """Function for restoring halo/galaxy/cloud dictionary attributes.

    Parameters
    ----------
    obj_list : list
        List of objects we are restoring attributes to.
    hd : h5py.Group
        Open HDF5 dataset.
    unit_reg : yt unit registry
        Unit registry.    

    """
    if 'dicts' not in hd: return
    hdd = hd['dicts']
    for k, v in six.iteritems(hdd):
        data = np.array(v)

        unit, use_quant = get_unit_quant(v, data)

        dict_name, dict_key = k.split('.')
        if dict_key in blacklist: continue
        for i in range(0, len(obj_list)):
            if not hasattr(obj_list[i], dict_name):
                setattr(obj_list[i], dict_name, {})
            cur_dict = getattr(obj_list[i], dict_name)

            if unit is not None:
                if use_quant:
                    cur_dict[dict_key] = YTQuantity(data[i],
                                                    unit,
                                                    registry=unit_reg)
                else:
                    cur_dict[dict_key] = YTArray(data[i],
                                                 unit,
                                                 registry=unit_reg)
            else:
                cur_dict[dict_key] = data[i]
            setattr(obj_list[i], dict_name, cur_dict)
コード例 #37
0
    def test_halo_catalog(self):
        rs = np.random.RandomState(3670474)
        n_halos = 100
        fields = ["particle_mass"
                  ] + [f"particle_position_{ax}" for ax in "xyz"]
        units = ["g"] + ["cm"] * 3
        data = {
            field: YTArray(rs.random_sample(n_halos), unit)
            for field, unit in zip(fields, units)
        }

        fn = fake_halo_catalog(data)
        ds = yt_load(fn)

        assert type(ds) is YTHaloCatalogDataset

        for field in fields:
            f1 = data[field].in_base()
            f1.sort()
            f2 = ds.r[("all", field)].in_base()
            f2.sort()
            assert_array_equal(f1, f2)
コード例 #38
0
def test_astropy():
    from yt.utilities.on_demand_imports import _astropy

    ap_arr = np.arange(10)*_astropy.units.km/_astropy.units.hr
    yt_arr = YTArray(np.arange(10), "km/hr")
    yt_arr2 = YTArray.from_astropy(ap_arr)

    ap_quan = 10.*_astropy.units.Msun**0.5/(_astropy.units.kpc**3)
    yt_quan = YTQuantity(10., "sqrt(Msun)/kpc**3")
    yt_quan2 = YTQuantity.from_astropy(ap_quan)

    yield assert_array_equal, ap_arr, yt_arr.to_astropy()
    yield assert_array_equal, yt_arr, YTArray.from_astropy(ap_arr)
    yield assert_array_equal, yt_arr, yt_arr2

    yield assert_equal, ap_quan, yt_quan.to_astropy()
    yield assert_equal, yt_quan, YTQuantity.from_astropy(ap_quan)
    yield assert_equal, yt_quan, yt_quan2

    yield assert_array_equal, yt_arr, YTArray.from_astropy(yt_arr.to_astropy())
    yield assert_equal, yt_quan, YTQuantity.from_astropy(yt_quan.to_astropy())
コード例 #39
0
ファイル: xray_binaries.py プロジェクト: jzuhone/pyxsim
def make_xrb_particles(data_source, age_field, scale_length, 
                       sfr_time_range=(1.0, "Gyr"), prng=None):
    r"""
    This routine generates an in-memory dataset composed of X-ray binary particles
    from an input data source containing star particles. 

    Parameters
    ----------
    data_source : :class:`~yt.data_objects.data_containers.YTSelectionContainer`
        The yt data source to obtain the data from, such as a sphere, box, disk, 
        etc.
    age_field : string or (type, name) field tuple
        The stellar age field. Must be in some kind of time units. 
    scale_length : string, (ftype, fname) tuple, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity`
        The radial length scale over which to scatter the XRB particles
        from their parent star particle. Can be the name of a smoothing
        length field for the stars, a (value, unit) tuple, or a YTQuantity.
    sfr_time_range : string, (ftype, fname) tuple, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity`, optional
        The recent time range over which to calculate the star formation rate from
        the current time in the dataset. Default: 1.0 Gyr
    prng : integer or :class:`~numpy.random.RandomState` object 
        A pseudo-random number generator. Typically will only be specified
        if you have a reason to generate the same set of random numbers, such as for a
        test. Default is to use the :mod:`numpy.random` module.
    """
    prng = parse_prng(prng)

    ds = data_source.ds

    ptype = data_source._determine_fields(age_field)[0][0]

    t = data_source[age_field].to("Gyr")
    m = data_source[(ptype, "particle_mass")].to("Msun")

    sfr_time_range = parse_value(sfr_time_range, "Gyr")

    recent = t < sfr_time_range

    n_recent = recent.sum()

    if n_recent == 0:
        sfr = 0.0
    else:
        sfr = (m[recent].sum()/sfr_time_range).to("Msun/yr").v

    mylog.info("%d star particles were formed in the last " % n_recent +
               "%s for a SFR of %4.1f Msun/yr." % (sfr_time_range, sfr))

    mtot = m.sum()

    npart = m.size

    scale_field = None
    if isinstance(scale_length, tuple):
        if isinstance(scale_length[0], string_types):
            scale_field = scale_length
    elif isinstance(scale_length, string_types):
        scale_field = (ptype, scale_length)

    if scale_field is None:
        if isinstance(scale_length, tuple):
            scale = YTArray([scale_length[0]]*npart, scale_length[1])
        elif isinstance(scale_length, YTQuantity):
            scale = YTArray([scale_length]*npart)
        else:
            scale = YTArray([scale_length[0]]*npart, "kpc")
    else:
        scale = data_source[scale_length]

    scale = scale.to('kpc').d

    N_l = lmxb_cdf(Lcut)*mtot.v*1.0e-11
    N_h = hmxb_cdf(Lcut)*sfr

    N_all = N_l+N_h

    if N_all == 0.0:
        raise RuntimeError("There are no X-ray binaries to generate!")

    # Compute conversion factors from luminosity to count rate

    lmxb_factor = get_scale_factor(alpha_lmxb, emin_lmxb, emax_lmxb)
    hmxb_factor = get_scale_factor(alpha_hmxb, emin_hmxb, emax_hmxb)

    xp = []
    yp = []
    zp = []
    vxp = []
    vyp = []
    vzp = []
    lp = []
    rp = []
    ap = []

    if N_l > 0.0:

        F_l = np.zeros(nbins+1)
        for i in range(1, nbins+1):
            F_l[i] = lmxb_cdf(Lbins[i]) 
        F_l /= F_l[-1]
        invcdf_l = InterpolatedUnivariateSpline(F_l, logLbins)

        n_l = prng.poisson(lam=N_l*m/mtot)

        mylog.info("Number of low-mass X-ray binaries: %s" % n_l.sum())

        for i, n in enumerate(n_l):
            if n > 0:
                randvec = prng.uniform(size=n)
                l = YTArray(10**invcdf_l(randvec)*1.0e38, "erg/s")
                r = YTArray(l.v*lmxb_factor, "photons/s/keV")
                # Now convert output luminosities to bolometric
                l *= bc_lmxb
                x = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                y = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                z = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                x += data_source[ptype, "particle_position_x"][i].to("kpc")
                y += data_source[ptype, "particle_position_y"][i].to("kpc")
                z += data_source[ptype, "particle_position_z"][i].to("kpc")
                vx = YTArray([data_source[ptype, "particle_velocity_x"][i]]*n).to('km/s')
                vy = YTArray([data_source[ptype, "particle_velocity_y"][i]]*n).to('km/s')
                vz = YTArray([data_source[ptype, "particle_velocity_z"][i]]*n).to('km/s')
                xp.append(x)
                yp.append(y)
                zp.append(z)
                vxp.append(vx)
                vyp.append(vy)
                vzp.append(vz)
                lp.append(l)
                rp.append(r)
                ap.append(np.array([alpha_lmxb]*n))

    if N_h > 0.0:

        F_h = np.zeros(nbins+1)
        for i in range(1, nbins+1):
            F_h[i] = hmxb_cdf(Lbins[i])
        F_h /= F_h[-1]
        invcdf_h = InterpolatedUnivariateSpline(F_h, logLbins)

        n_h = prng.poisson(lam=N_h*m/mtot)

        mylog.info("Number of high-mass X-ray binaries: %s" % n_h.sum())

        for i, n in enumerate(n_h):
            if n > 0:
                randvec = prng.uniform(size=n)
                l = YTArray(10**invcdf_h(randvec)*1.0e38, "erg/s")
                r = YTArray(l.v*hmxb_factor, "photons/s/keV")
                # Now convert output luminosities to bolometric
                l *= bc_hmxb
                x = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                y = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                z = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                x += data_source[ptype, "particle_position_x"][i].to("kpc")
                y += data_source[ptype, "particle_position_y"][i].to("kpc")
                z += data_source[ptype, "particle_position_z"][i].to("kpc")
                vx = YTArray([data_source[ptype, "particle_velocity_x"][i]]*n).to('km/s')
                vy = YTArray([data_source[ptype, "particle_velocity_y"][i]]*n).to('km/s')
                vz = YTArray([data_source[ptype, "particle_velocity_z"][i]]*n).to('km/s')
                xp.append(x)
                yp.append(y)
                zp.append(z)
                vxp.append(vx)
                vyp.append(vy)
                vzp.append(vz)
                lp.append(l)
                rp.append(r)
                ap.append(np.array([alpha_hmxb]*n))

    xp = uconcatenate(xp)
    yp = uconcatenate(yp)
    zp = uconcatenate(zp)
    vxp = uconcatenate(vxp)
    vyp = uconcatenate(vyp)
    vzp = uconcatenate(vzp)
    lp = uconcatenate(lp)
    rp = uconcatenate(rp)
    ap = uconcatenate(ap)

    data = {"particle_position_x": (xp.d, str(xp.units)),
            "particle_position_y": (yp.d, str(yp.units)),
            "particle_position_z": (zp.d, str(zp.units)),
            "particle_velocity_x": (vxp.d, str(vxp.units)),
            "particle_velocity_y": (vyp.d, str(vyp.units)),
            "particle_velocity_z": (vzp.d, str(vzp.units)),
            "particle_luminosity": (lp.d, str(lp.units)),
            "particle_count_rate": (rp.d, str(rp.units)),
            "particle_spectral_index": ap}

    dle = ds.domain_left_edge.to("kpc").v
    dre = ds.domain_right_edge.to("kpc").v

    bbox = np.array([[dle[i], dre[i]] for i in range(3)])

    new_ds = load_particles(data, bbox=bbox, length_unit="kpc",
                            time_unit="Myr", mass_unit="Msun", 
                            velocity_unit="km/s")

    return new_ds