def assert_allclose_units(actual, desired, rtol=1e-7, atol=0, **kwargs): """Raise an error if two objects are not equal up to desired tolerance This is a wrapper for :func:`numpy.testing.assert_allclose` that also verifies unit consistency Parameters ---------- actual : array-like Array obtained (possibly with attached units) desired : array-like Array to compare with (possibly with attached units) rtol : float, optional Relative tolerance, defaults to 1e-7 atol : float or quantity, optional Absolute tolerance. If units are attached, they must be consistent with the units of ``actual`` and ``desired``. If no units are attached, assumes the same units as ``desired``. Defaults to zero. Notes ----- Also accepts additional keyword arguments accepted by :func:`numpy.testing.assert_allclose`, see the documentation of that function for details. """ # Create a copy to ensure this function does not alter input arrays act = YTArray(actual) des = YTArray(desired) try: des = des.in_units(act.units) except UnitOperationError as e: raise AssertionError( "Units of actual (%s) and desired (%s) do not have " "equivalent dimensions" % (act.units, des.units)) from e rt = YTArray(rtol) if not rt.units.is_dimensionless: raise AssertionError( f"Units of rtol ({rt.units}) are not dimensionless") if not isinstance(atol, YTArray): at = YTQuantity(atol, des.units) try: at = at.in_units(act.units) except UnitOperationError as e: raise AssertionError("Units of atol (%s) and actual (%s) do not have " "equivalent dimensions" % (at.units, act.units)) from e # units have been validated, so we strip units before calling numpy # to avoid spurious errors act = act.value des = des.value rt = rt.value at = at.value return assert_allclose(act, des, rt, at, **kwargs)
def get_interpolator(self, data_type, e_min, e_max, energy=True): data = getattr(self, "emissivity_%s" % data_type) if not energy: data = data[..., :] / self.emid.v e_min = YTQuantity(e_min, "keV")*(1.0+self.redshift) e_max = YTQuantity(e_max, "keV")*(1.0+self.redshift) if (e_min - self.ebin[0]) / e_min < -1e-3 or \ (e_max - self.ebin[-1]) / e_max > 1e-3: raise EnergyBoundsException(self.ebin[0], self.ebin[-1]) e_is, e_ie = np.digitize([e_min, e_max], self.ebin) e_is = np.clip(e_is - 1, 0, self.ebin.size - 1) e_ie = np.clip(e_ie, 0, self.ebin.size - 1) my_dE = self.dE[e_is: e_ie].copy() # clip edge bins if the requested range is smaller my_dE[0] -= e_min - self.ebin[e_is] my_dE[-1] -= self.ebin[e_ie] - e_max interp_data = (data[..., e_is:e_ie]*my_dE).sum(axis=-1) if data.ndim == 2: emiss = UnilinearFieldInterpolator(np.log10(interp_data), [self.log_T[0], self.log_T[-1]], "log_T", truncate=True) else: emiss = BilinearFieldInterpolator(np.log10(interp_data), [self.log_nH[0], self.log_nH[-1], self.log_T[0], self.log_T[-1]], ["log_nH", "log_T"], truncate=True) return emiss
def __init__(self, emin, emax, nchan): self.emin = YTQuantity(emin, "keV") self.emax = YTQuantity(emax, "keV") self.nchan = nchan self.ebins = np.linspace(self.emin, self.emax, nchan + 1) self.de = np.diff(self.ebins) self.emid = 0.5 * (self.ebins[1:] + self.ebins[:-1])
def add_line(self, label, field_name, wavelength, f_value, gamma, atomic_mass, label_threshold=None): r"""Add an absorption line to the list of lines included in the spectrum. Parameters ---------- label : string label for the line. field_name : string field name from ray data for column densities. wavelength : float line rest wavelength in angstroms. f_value : float line f-value. gamma : float line gamma value. atomic_mass : float mass of atom in amu. """ self.line_list.append({ 'label': label, 'field_name': field_name, 'wavelength': YTQuantity(wavelength, "angstrom"), 'f_value': f_value, 'gamma': gamma, 'atomic_mass': YTQuantity(atomic_mass, "amu"), 'label_threshold': label_threshold })
def test_subclass(): class YTASubclass(YTArray): pass a = YTASubclass([4, 5, 6], 'g') b = YTASubclass([7, 8, 9], 'kg') nu = YTASubclass([10, 11, 12], '') nda = np.array([3, 4, 5]) yta = YTArray([6, 7, 8], 'mg') loq = [YTQuantity(6, 'mg'), YTQuantity(7, 'mg'), YTQuantity(8, 'mg')] ytq = YTQuantity(4, 'cm') ndf = np.float64(3) def op_comparison(op, inst1, inst2, compare_class): assert_isinstance(op(inst1, inst2), compare_class) assert_isinstance(op(inst2, inst1), compare_class) for op in (operator.mul, operator.div, operator.truediv): for inst in (b, ytq, ndf, yta, nda, loq): yield op_comparison, op, a, inst, YTASubclass yield op_comparison, op, ytq, nda, YTArray yield op_comparison, op, ytq, yta, YTArray for op in (operator.add, operator.sub): yield op_comparison, op, nu, nda, YTASubclass yield op_comparison, op, a, b, YTASubclass yield op_comparison, op, a, yta, YTASubclass yield op_comparison, op, a, loq, YTASubclass yield assert_isinstance, a[0], YTQuantity yield assert_isinstance, a[:], YTASubclass yield assert_isinstance, a[:2], YTASubclass
def test_reductions(): arr = YTArray([[1, 2, 3], [4, 5, 6]], 'cm') ev_result = arr.dot(YTArray([1, 2, 3], 'cm')) res = YTArray([14., 32.], 'cm**2') assert_equal(ev_result, res) assert_equal(ev_result.units, res.units) assert_isinstance(ev_result, YTArray) answers = { 'prod': (YTQuantity(720, 'cm**6'), YTArray([4, 10, 18], 'cm**2'), YTArray([6, 120], 'cm**3')), 'sum': ( YTQuantity(21, 'cm'), YTArray([5., 7., 9.], 'cm'), YTArray([6, 15], 'cm'), ), 'mean': (YTQuantity(3.5, 'cm'), YTArray([2.5, 3.5, 4.5], 'cm'), YTArray([2, 5], 'cm')), 'std': (YTQuantity(1.707825127659933, 'cm'), YTArray([1.5, 1.5, 1.5], 'cm'), YTArray([0.81649658, 0.81649658], 'cm')), } for op, (result1, result2, result3) in answers.items(): ev_result = getattr(arr, op)() assert_almost_equal(ev_result, result1) assert_almost_equal(ev_result.units, result1.units) assert_isinstance(ev_result, YTQuantity) for axis, result in [(0, result2), (1, result3), (-1, result3)]: ev_result = getattr(arr, op)(axis=axis) assert_almost_equal(ev_result, result) assert_almost_equal(ev_result.units, result.units) assert_isinstance(ev_result, YTArray)
def get_interpolator(self, data, e_min, e_max): e_min = YTQuantity(e_min, "keV") e_max = YTQuantity(e_max, "keV") if (e_min - self.E_bins[0]) / e_min < -1e-3 or \ (e_max - self.E_bins[-1]) / e_max > 1e-3: raise EnergyBoundsException(self.E_bins[0], self.E_bins[-1]) e_is, e_ie = np.digitize([e_min, e_max], self.E_bins) e_is = np.clip(e_is - 1, 0, self.E_bins.size - 1) e_ie = np.clip(e_ie, 0, self.E_bins.size - 1) my_dnu = self.dnu[e_is:e_ie].copy() # clip edge bins if the requested range is smaller my_dnu[0] -= ((e_min - self.E_bins[e_is]) / hcgs).in_units("Hz") my_dnu[-1] -= ((self.E_bins[e_ie] - e_max) / hcgs).in_units("Hz") interp_data = (data[..., e_is:e_ie] * my_dnu).sum(axis=-1) if len(data.shape) == 2: emiss = UnilinearFieldInterpolator(np.log10(interp_data), [self.log_T[0], self.log_T[-1]], "log_T", truncate=True) else: emiss = BilinearFieldInterpolator(np.log10(interp_data), [ self.log_nH[0], self.log_nH[-1], self.log_T[0], self.log_T[-1] ], ["log_nH", "log_T"], truncate=True) return emiss
def _BGx1(field, data): B0s = YTQuantity(2.0, "G") B0p = YTQuantity(1.0, "G") Rs = YTQuantity(6.955e+10, "cm") Rp = YTQuantity(1.5 * 0.10045 * Rs, "cm") a = YTQuantity(0.047, "au").in_units("cm") center = data.get_field_parameter('center') x1 = data["x"].in_units('cm') x2 = data["y"].in_units('cm') x3 = data["z"].in_units('cm') rs = np.sqrt(x1 * x1 + x2 * x2 + x3 * x3) rp = np.sqrt((x1 - a) * (x1 - a) + x2 * x2 + x3 * x3) BGx1 = data.ds.arr(np.zeros_like(data["magnetic_field_x"]), "G") BGx1 = 3.0 * x1 * x3 * B0s * Rs**3 * rs**(-5) + 3.0 * ( x1 - a) * x3 * B0p * Rp**3 * rp**(-5) BGx1[rs <= Rs] = 3.0 * x1[rs <= Rs] * x3[rs <= Rs] * B0s * Rs**3 * rs[ rs <= Rs]**(-5) BGx1[rs <= 0.5 * Rs] = 0.0 BGx1[rp <= Rp] = 3.0*(x1[rp <= Rp] - a)*x3[rp <= Rp]\ *B0p*Rp**3*rp[rp <= Rp]**(-5) BGx1[rp <= 0.5 * Rp] = 0.0 return BGx1
def _BGx3(field, data): B0s = YTQuantity(2.0, "G") B0p = YTQuantity(1.0, "G") Rs = YTQuantity(6.955e+10, "cm") Rp = YTQuantity(1.5 * 0.10045 * Rs, "cm") a = YTQuantity(0.047, "au").in_units("cm") x1 = data["x"].in_units('cm') x2 = data["y"].in_units('cm') x3 = data["z"].in_units('cm') rs = np.sqrt(x1 * x1 + x2 * x2 + x3 * x3) rp = np.sqrt((x1 - a) * (x1 - a) + x2 * x2 + x3 * x3) BG_z = data.ds.arr(np.zeros_like(data["magnetic_field_z"]), "G") BGx3 = (3.0*x3*x3 - rs*rs)*B0s*Rs**3*rs**(-5) \ + (3.0*x3*x3 - rp*rp)*B0p*Rp**3*rp**(-5) BGx3[rs <= Rs] = (3.0*x3[rs <= Rs]*x3[rs <= Rs] - \ rs[rs <= Rs]*rs[rs <= Rs])*B0s*Rs**3*rs[rs <= Rs]**(-5) BGx3[rs <= 0.5 * Rs] = 16.0 * B0s BGx3[rp <= Rp] = (3.0*x3[rp <= Rp]*x3[rp <= Rp] - \ rp[rp <= Rp]*rp[rp <= Rp])*B0p*Rp**3*rp[rp <= Rp]**(-5) BGx3[rp <= 0.5 * Rp] = 16.0 * B0p return BGx3
def test_to_value(): a = YTArray([1.0, 2.0, 3.0], "kpc") assert_equal(a.to_value(), np.array([1.0, 2.0, 3.0])) assert_equal(a.to_value(), a.value) assert_equal(a.to_value("km"), a.in_units("km").value) b = YTQuantity(5.5, "Msun") assert_equal(b.to_value(), 5.5) assert_equal(b.to_value("g"), b.in_units("g").value)
def _solar_cooling_time(field, data): C1 = YTQuantity(3.88e11, 's/K**(1/2)/cm**3') C2 = YTQuantity(5e7, 'K') mu = 0.6 mH = YTQuantity(1.6726219e-24, 'g') fm = 1.0 T = data[('gas', 'temperature')].in_units('K') num = C1 * mu * mH * T**(1. / 2.) denom = data[('gas', 'density')].in_units('g/cm**3') * (1 + C2 * fm / T) return num / denom
def test_pint(): from pint import UnitRegistry ureg = UnitRegistry() p_arr = np.arange(10) * ureg.km / ureg.hr yt_arr = YTArray(np.arange(10), "km/hr") yt_arr2 = YTArray.from_pint(p_arr) p_quan = 10. * ureg.g**0.5 / (ureg.mm**3) yt_quan = YTQuantity(10., "sqrt(g)/mm**3") yt_quan2 = YTQuantity.from_pint(p_quan) assert_array_equal(p_arr, yt_arr.to_pint()) assert_equal(p_quan, yt_quan.to_pint()) assert_array_equal(yt_arr, YTArray.from_pint(p_arr)) assert_array_equal(yt_arr, yt_arr2) assert_equal(p_quan.magnitude, yt_quan.to_pint().magnitude) assert_equal(p_quan, yt_quan.to_pint()) assert_equal(yt_quan, YTQuantity.from_pint(p_quan)) assert_equal(yt_quan, yt_quan2) assert_array_equal(yt_arr, YTArray.from_pint(yt_arr.to_pint())) assert_equal(yt_quan, YTQuantity.from_pint(yt_quan.to_pint()))
def _primordial_cooling_time(field, data): # taken from https://arxiv.org/pdf/astro-ph/9809159.pdf, equation 11 C1 = YTQuantity(3.88e11, 's/K**(1/2)/cm**3') C2 = YTQuantity(5e7, 'K') mu = 0.6 mH = YTQuantity(1.6726219e-24, 'g') fm = 0.03 T = data[('gas', 'temperature')].in_units('K') num = C1 * mu * mH * T**(1. / 2.) denom = data[('gas', 'density')].in_units('g/cm**3') * (1 + C2 * fm / T) return num / denom
def from_fits_file(cls, fitsfile): """ Initialize an :class:`~pyxsim.event_list.EventList` from a FITS file with filename *fitsfile*. """ hdulist = pyfits.open(fitsfile, memmap=True) tblhdu = hdulist["EVENTS"] events = {} parameters = {} parameters["exp_time"] = YTQuantity(tblhdu.header["EXPOSURE"], "s") parameters["area"] = YTQuantity(tblhdu.header["AREA"], "cm**2") parameters["sky_center"] = YTArray( [tblhdu.header["TCRVL2"], tblhdu.header["TCRVL3"]], "deg") num_events = tblhdu.header["NAXIS2"] start_e = comm.rank * num_events // comm.size end_e = (comm.rank + 1) * num_events // comm.size wcs = pywcs.WCS(naxis=2) wcs.wcs.crpix = [tblhdu.header["TCRPX2"], tblhdu.header["TCRPX3"]] wcs.wcs.crval = parameters["sky_center"].d wcs.wcs.cdelt = [tblhdu.header["TCDLT2"], tblhdu.header["TCDLT3"]] wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"] wcs.wcs.cunit = ["deg"] * 2 xx = tblhdu.data["X"][start_e:end_e] yy = tblhdu.data["Y"][start_e:end_e] xx, yy = wcs.wcs_pix2world(xx, yy, 1) events["xsky"] = YTArray(xx, "degree") events["ysky"] = YTArray(yy, "degree") events["eobs"] = YTArray(tblhdu.data["ENERGY"][start_e:end_e] / 1000., "keV") if "RESPFILE" in tblhdu.header: parameters["rmf"] = tblhdu.header["RESPFILE"] parameters["arf"] = tblhdu.header["ANCRFILE"] parameters["channel_type"] = tblhdu.header["CHANTYPE"].lower() parameters["mission"] = tblhdu.header["MISSION"] parameters["telescope"] = tblhdu.header["TELESCOP"] parameters["instrument"] = tblhdu.header["INSTRUME"] events[parameters["channel_type"]] = tblhdu.data[ parameters["channel_type"]][start_e:end_e] hdulist.close() if "rmf" in tblhdu.header: return ConvolvedEventList(events, parameters) else: return EventList(events, parameters)
def _set_units(self, ds, base_units): attrs = ( "length_unit", "mass_unit", "time_unit", "velocity_unit", "magnetic_unit", ) cgs_units = ("cm", "g", "s", "cm/s", "gauss") for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units): if unit is None: if ds is not None: u = getattr(ds, attr, None) elif attr == "velocity_unit": u = self.length_unit / self.time_unit elif attr == "magnetic_unit": u = np.sqrt( 4.0 * np.pi * self.mass_unit / (self.time_unit ** 2 * self.length_unit) ) else: u = cgs_unit else: u = unit if isinstance(u, str): uq = YTQuantity(1.0, u) elif isinstance(u, numeric_type): uq = YTQuantity(u, cgs_unit) elif isinstance(u, YTQuantity): uq = u.copy() elif isinstance(u, tuple): uq = YTQuantity(u[0], u[1]) else: uq = None if uq is not None and uq.units.is_code_unit: mylog.warning( "Cannot use code units of '%s' " % uq.units + "when creating a FITSImageData instance! " "Converting to a cgs equivalent." ) uq.convert_to_cgs() if attr == "length_unit" and uq.value != 1.0: mylog.warning( "Converting length units " "from %s to %s." % (uq, uq.units) ) uq = YTQuantity(1.0, uq.units) setattr(self, attr, uq)
def _metal_cooling_time(field, data): C1 = YTQuantity(3.88e11, 's/K**(1/2)/cm**3') C2 = YTQuantity(5e7, 'K') mu = 0.6 mH = YTQuantity(1.6726219e-24, 'g') T = data[('gas', 'temperature')].in_units('K') Z = data[('gas', 'metallicity')].in_units('Zsun') fm = 1 + 0.14 * np.log(Z.d) # fm = np.array(fm) # fm[fm < 0.03] = 0.03 num = C1 * mu * mH * T**(1. / 2.) denom = data[('gas', 'density')].in_units('g/cm**3') * (1 + C2 * fm / T) return num / denom
def test_copy(): quan = YTQuantity(1, 'g') arr = YTArray([1, 2, 3], 'cm') yield assert_equal, copy.copy(quan), quan yield assert_array_equal, copy.copy(arr), arr yield assert_equal, copy.deepcopy(quan), quan yield assert_array_equal, copy.deepcopy(arr), arr yield assert_equal, quan.copy(), quan yield assert_array_equal, arr.copy(), arr yield assert_equal, np.copy(quan), quan yield assert_array_equal, np.copy(arr), arr
def test_copy(): quan = YTQuantity(1, 'g') arr = YTArray([1, 2, 3], 'cm') assert_equal(copy.copy(quan), quan) assert_array_equal(copy.copy(arr), arr) assert_equal(copy.deepcopy(quan), quan) assert_array_equal(copy.deepcopy(arr), arr) assert_equal(quan.copy(), quan) assert_array_equal(arr.copy(), arr) assert_equal(np.copy(quan), quan) assert_array_equal(np.copy(arr), arr)
def _sanitize_min_max_units(amin, amax, finfo, registry): # returns a copy of amin and amax, converted to finfo's output units umin = getattr(amin, 'units', None) umax = getattr(amax, 'units', None) if umin is None: umin = Unit(finfo.output_units, registry=registry) rmin = YTQuantity(amin, umin) else: rmin = amin.in_units(finfo.output_units) if umax is None: umax = Unit(finfo.output_units, registry=registry) rmax = YTQuantity(amax, umax) else: rmax = amax.in_units(finfo.output_units) return rmin, rmax
def test_copy(): quan = YTQuantity(1, 'g') arr = YTArray([1, 2, 3], 'cm') yield assert_equal, copy.copy(quan), quan yield assert_array_equal, copy.copy(arr), arr yield assert_equal, copy.deepcopy(quan), quan yield assert_array_equal, copy.deepcopy(arr), arr yield assert_equal, quan.copy(), quan yield assert_array_equal, arr.copy(), arr yield assert_equal, np.copy(quan), quan yield assert_array_equal, np.copy(arr), arr
def fake_halo_catalog(data): filename = "catalog.0.h5" ftypes = dict((field, '.') for field in data) extra_attrs = { "data_type": "halo_catalog", "num_halos": data['particle_mass'].size } ds = { 'cosmological_simulation': 1, 'omega_lambda': 0.7, 'omega_matter': 0.3, 'hubble_constant': 0.7, 'current_redshift': 0, 'current_time': YTQuantity(1, 'yr'), 'domain_left_edge': YTArray(np.zeros(3), 'cm'), 'domain_right_edge': YTArray(np.ones(3), 'cm') } save_as_dataset(ds, filename, data, field_types=ftypes, extra_attrs=extra_attrs) return filename
def restore_object_attributes(obj_list, hd, unit_reg): """Function for restoring halo/galaxy/cloud attributes. Parameters ---------- obj_list : list List of objects we are restoring attributes to. hd : h5py.Group Open HDF5 dataset. unit_reg : yt unit registry Unit registry. """ for k, v in six.iteritems(hd): if k in blacklist: continue if k == 'lists' or k == 'dicts': continue data = np.array(v) unit, use_quant = get_unit_quant(v, data) for i in range(0, len(obj_list)): if unit is not None: if use_quant: setattr(obj_list[i], k, YTQuantity(data[i], unit, registry=unit_reg)) else: setattr(obj_list[i], k, YTArray(data[i], unit, registry=unit_reg)) else: setattr(obj_list[i], k, data[i])
def __call__(self, plot): # Instantiation of these is cheap if plot._type_name == "CuttingPlane": qcb = CuttingQuiverCallback("cutting_plane_velocity_x", "cutting_plane_velocity_y", self.factor) else: ax = plot.data.axis (xi, yi) = (plot.data.ds.coordinates.x_axis[ax], plot.data.ds.coordinates.y_axis[ax]) axis_names = plot.data.ds.coordinates.axis_name xv = "velocity_%s" % (axis_names[xi]) yv = "velocity_%s" % (axis_names[yi]) bv = plot.data.get_field_parameter("bulk_velocity") if bv is not None: bv_x = bv[xi] bv_y = bv[yi] else: bv_x = bv_y = YTQuantity(0, 'cm/s') qcb = QuiverCallback(xv, yv, self.factor, scale=self.scale, scale_units=self.scale_units, normalize=self.normalize, bv_x=bv_x, bv_y=bv_y) return qcb(plot)
def __call__(self, plot): if self.units is None: t = plot.data.ds.current_time.in_units('s') scale_keys = [ 'fs', 'ps', 'ns', 'us', 'ms', 's', 'hr', 'day', 'yr', 'kyr', 'Myr', 'Gyr' ] for i, k in enumerate(scale_keys): if t < YTQuantity(1, k): break t.convert_to_units(k) self.units = scale_keys[i - 1] else: t = plot.data.ds.current_time.in_units(self.units) s = self.format.format(time=float(t), units=self.units) plot._axes.hold(True) if self.normalized: plot._axes.text(self.x, self.y, s, horizontalalignment='center', verticalalignment='center', transform=plot._axes.transAxes, bbox=self.bbox_dict) else: plot._axes.text(self.x, self.y, s, bbox=self.bbox_dict, **self.kwargs) plot._axes.hold(False)
def test_registry_association(): ds = fake_random_ds(64, nprocs=1, length_unit=10) a = ds.quan(3, 'cm') b = YTQuantity(4, 'm') c = ds.quan(6, '') d = 5 assert_equal(id(a.units.registry), id(ds.unit_registry)) def binary_op_registry_comparison(op): e = op(a, b) f = op(b, a) g = op(c, d) h = op(d, c) assert_equal(id(e.units.registry), id(ds.unit_registry)) assert_equal(id(f.units.registry), id(b.units.registry)) assert_equal(id(g.units.registry), id(h.units.registry)) assert_equal(id(g.units.registry), id(ds.unit_registry)) def unary_op_registry_comparison(op): c = op(a) d = op(b) assert_equal(id(c.units.registry), id(ds.unit_registry)) assert_equal(id(d.units.registry), id(b.units.registry)) binary_ops = [operator.add, operator.sub, operator.mul, operator.truediv] if hasattr(operator, "div"): binary_ops.append(operator.div) for op in binary_ops: binary_op_registry_comparison(op) for op in [operator.abs, operator.neg, operator.pos]: unary_op_registry_comparison(op)
def test_nonspatial_data(): tmpdir = make_tempdir() curdir = os.getcwd() os.chdir(tmpdir) ds = data_dir_load(enzotiny) region = ds.box([0.25] * 3, [0.75] * 3) sphere = ds.sphere(ds.domain_center, (10, "Mpc")) my_data = {} my_data["region_density"] = region[("gas", "density")] my_data["sphere_density"] = sphere[("gas", "density")] fn = "test_data.h5" save_as_dataset(ds, fn, my_data) full_fn = os.path.join(tmpdir, fn) array_ds = load(full_fn) compare_unit_attributes(ds, array_ds) assert isinstance(array_ds, YTNonspatialDataset) yield YTDataFieldTest(full_fn, "region_density", geometric=False) yield YTDataFieldTest(full_fn, "sphere_density", geometric=False) my_data = {"density": YTArray(np.linspace(1.0, 20.0, 10), "g/cm**3")} fake_ds = {"current_time": YTQuantity(10, "Myr")} fn = "random_data.h5" save_as_dataset(fake_ds, fn, my_data) full_fn = os.path.join(tmpdir, fn) new_ds = load(full_fn) assert isinstance(new_ds, YTNonspatialDataset) yield YTDataFieldTest(full_fn, ("data", "density"), geometric=False) os.chdir(curdir) if tmpdir != ".": shutil.rmtree(tmpdir)
def fake_halo_catalog(data): filename = "catalog.0.h5" ftypes = {field: "." for field in data} extra_attrs = { "data_type": "halo_catalog", "num_halos": data["particle_mass"].size } ds = { "cosmological_simulation": 1, "omega_lambda": 0.7, "omega_matter": 0.3, "hubble_constant": 0.7, "current_redshift": 0, "current_time": YTQuantity(1, "yr"), "domain_left_edge": YTArray(np.zeros(3), "cm"), "domain_right_edge": YTArray(np.ones(3), "cm"), } save_as_dataset(ds, filename, data, field_types=ftypes, extra_attrs=extra_attrs) return filename
def test_fix_length(): """ Test fixing the length of an array. Used in spheres and other data objects """ ds = fake_random_ds(64, nprocs=1, length_unit=10) length = ds.quan(1.0, 'code_length') new_length = fix_length(length, ds=ds) yield assert_equal, YTQuantity(10, 'cm'), new_length
def return_spectrum(self, temperature, metallicity, redshift, norm, velocity=0.0): """ Given the properties of a thermal plasma, return a spectrum. Parameters ---------- temperature : float The temperature of the plasma in keV. metallicity : float The metallicity of the plasma in solar units. redshift : float The redshift of the plasma. norm : float The normalization of the model, in the standard Xspec units of 1.0e-14*EM/(4*pi*(1+z)**2*D_A**2). velocity : float, optional Velocity broadening parameter in km/s. Default: 0.0 """ velocity = YTQuantity(velocity, "km/s").in_cgs().v scale_factor = 1.0 / (1. + redshift) tindex = np.searchsorted(self.Tvals, temperature) - 1 if tindex >= self.Tvals.shape[0] - 1 or tindex < 0: return YTArray(np.zeros(self.nchan), "photons/s/cm**2") dT = (temperature - self.Tvals[tindex]) / self.dTvals[tindex] cosmic_spec = np.zeros(self.nchan) metal_spec = np.zeros(self.nchan) fac = [1.0 - dT, dT] for i, ikT in enumerate([tindex, tindex + 1]): line_fields, coco_fields = self._preload_data(ikT) kT = self.Tvals[ikT] # First do H,He, and trace elements for elem in self.cosmic_elem: cosmic_spec += fac[i] * self._make_spectrum(kT, elem, line_fields, coco_fields, scale_factor, velocity=velocity) # Next do the metals for elem in self.metal_elem: metal_spec += fac[i] * self._make_spectrum(kT, elem, line_fields, coco_fields, scale_factor, velocity=velocity) tspec = (cosmic_spec + metallicity * metal_spec) return YTArray(1.0e14 * norm * tspec, "photons/s/cm**2")
def test_line_emission(): bms = BetaModelSource() ds = bms.ds def _dm_emission(field, data): return cross_section * (data["dark_matter_density"] / m_chi)**2 * data["cell_volume"] ds.add_field(("gas", "dm_emission"), function=_dm_emission, units="s**-1") location = YTQuantity(3.5, "keV") sigma = YTQuantity(1000., "km/s") sigma_E = (location * sigma / clight).in_units("keV") A = YTQuantity(1000., "cm**2") exp_time = YTQuantity(2.0e5, "s") redshift = 0.01 sphere = ds.sphere("c", (100., "kpc")) line_model = LineSourceModel(location, "dm_emission", sigma="dark_matter_dispersion", prng=32) photons = PhotonList.from_data_source(sphere, redshift, A, exp_time, line_model) D_A = photons.parameters["fid_d_a"] dist_fac = 1.0 / (4. * np.pi * D_A * D_A * (1. + redshift)**3) dm_E = (sphere["dm_emission"]).sum() E = uconcatenate(photons["energy"]) n_E = len(E) n_E_pred = (exp_time * A * dm_E * dist_fac).in_units("dimensionless") loc = location / (1. + redshift) sig = sigma_E / (1. + redshift) assert np.abs(loc - E.mean()) < 1.645 * sig / np.sqrt(n_E) assert np.abs(E.std()**2 - sig * sig) < 1.645 * np.sqrt(2 * (n_E - 1)) * sig**2 / n_E assert np.abs(n_E - n_E_pred) < 1.645 * np.sqrt(n_E)
def convolve(self, field, kernel, **kwargs): """ Convolve an image with a kernel, either a simple Gaussian kernel or one provided by AstroPy. Currently, this only works for 2D images. All keyword arguments are passed to :meth:`~astropy.convolution.convolve`. Parameters ---------- field : string The name of the field to convolve. kernel : float, YTQuantity, (value, unit) tuple, or AstroPy Kernel object The kernel to convolve the image with. If this is an AstroPy Kernel object, the image will be convolved with it. Otherwise, it is assumed that the kernel is a Gaussian and that this value is the standard deviation. If a float, it is assumed that the units are pixels, but a (value, unit) tuple or YTQuantity can be supplied to specify the standard deviation in physical units. Examples -------- >>> fid = FITSSlice(ds, "z", "density") >>> fid.convolve("density", (3.0, "kpc")) """ if self.dimensionality == 3: raise RuntimeError( "Convolution currently only works for 2D FITSImageData!") conv = _astropy.conv if field not in self.keys(): raise KeyError("%s not an image!" % field) idx = self.fields.index(field) if not isinstance(kernel, conv.Kernel): if not isinstance(kernel, numeric_type): unit = str(self.wcs.wcs.cunit[0]) pix_scale = YTQuantity(self.wcs.wcs.cdelt[0], unit) if isinstance(kernel, tuple): stddev = YTQuantity(kernel[0], kernel[1]).to(unit) else: stddev = kernel.to(unit) kernel = stddev / pix_scale kernel = conv.Gaussian2DKernel(x_stddev=kernel) self.hdulist[idx].data = conv.convolve(self.hdulist[idx].data, kernel, **kwargs)
def test_electromagnetic(): from yt.units.dimensions import charge_mks, pressure, current_cgs, \ magnetic_field_mks, magnetic_field_cgs, power from yt.utilities.physical_constants import mu_0, qp from yt.utilities.physical_ratios import speed_of_light_cm_per_s # Various tests of SI and CGS electromagnetic units qp_mks = qp.to_equivalent("C", "SI") yield assert_equal, qp_mks.units.dimensions, charge_mks yield assert_array_almost_equal, qp_mks.v, 10.0*qp.v/speed_of_light_cm_per_s qp_cgs = qp_mks.to_equivalent("esu", "CGS") yield assert_array_almost_equal, qp_cgs, qp yield assert_equal, qp_cgs.units.dimensions, qp.units.dimensions qp_mks_k = qp.to_equivalent("kC", "SI") yield assert_array_almost_equal, qp_mks_k.v, 1.0e-2*qp.v/speed_of_light_cm_per_s B = YTQuantity(1.0, "T") B_cgs = B.to_equivalent("gauss", "CGS") yield assert_equal, B.units.dimensions, magnetic_field_mks yield assert_equal, B_cgs.units.dimensions, magnetic_field_cgs yield assert_array_almost_equal, B_cgs, YTQuantity(1.0e4, "gauss") u_mks = B*B/(2*mu_0) yield assert_equal, u_mks.units.dimensions, pressure u_cgs = B_cgs*B_cgs/(8*np.pi) yield assert_equal, u_cgs.units.dimensions, pressure yield assert_array_almost_equal, u_mks.in_cgs(), u_cgs I = YTQuantity(1.0, "A") I_cgs = I.to_equivalent("statA", "CGS") yield assert_array_almost_equal, I_cgs, YTQuantity(0.1*speed_of_light_cm_per_s, "statA") yield assert_array_almost_equal, I_cgs.to_equivalent("mA", "SI"), I.in_units("mA") yield assert_equal, I_cgs.units.dimensions, current_cgs R = YTQuantity(1.0, "ohm") R_cgs = R.to_equivalent("statohm", "CGS") P_mks = I*I*R P_cgs = I_cgs*I_cgs*R_cgs yield assert_equal, P_mks.units.dimensions, power yield assert_equal, P_cgs.units.dimensions, power yield assert_array_almost_equal, P_cgs.in_cgs(), P_mks.in_cgs() yield assert_array_almost_equal, P_cgs.in_mks(), YTQuantity(1.0, "W") V = YTQuantity(1.0, "statV") V_mks = V.to_equivalent("V", "SI") yield assert_array_almost_equal, V_mks.v, 1.0e8*V.v/speed_of_light_cm_per_s
def test_astropy(): from yt.utilities.on_demand_imports import _astropy ap_arr = np.arange(10)*_astropy.units.km/_astropy.units.hr yt_arr = YTArray(np.arange(10), "km/hr") yt_arr2 = YTArray.from_astropy(ap_arr) ap_quan = 10.*_astropy.units.Msun**0.5/(_astropy.units.kpc**3) yt_quan = YTQuantity(10., "sqrt(Msun)/kpc**3") yt_quan2 = YTQuantity.from_astropy(ap_quan) yield assert_array_equal, ap_arr, yt_arr.to_astropy() yield assert_array_equal, yt_arr, YTArray.from_astropy(ap_arr) yield assert_array_equal, yt_arr, yt_arr2 yield assert_equal, ap_quan, yt_quan.to_astropy() yield assert_equal, yt_quan, YTQuantity.from_astropy(ap_quan) yield assert_equal, yt_quan, yt_quan2 yield assert_array_equal, yt_arr, YTArray.from_astropy(yt_arr.to_astropy()) yield assert_equal, yt_quan, YTQuantity.from_astropy(yt_quan.to_astropy())
def test_dimensionless_conversion(): a = YTQuantity(1, 'Zsun') b = a.in_units('Zsun') a.convert_to_units('Zsun') yield assert_true, a.units.base_value == metallicity_sun yield assert_true, b.units.base_value == metallicity_sun
def test_unit_conversions(): """ Test operations that convert to different units or cast to ndarray """ from yt.units.yt_array import YTQuantity from yt.units.unit_object import Unit km = YTQuantity(1, 'km') km_in_cm = km.in_units('cm') cm_unit = Unit('cm') kpc_unit = Unit('kpc') yield assert_equal, km_in_cm, km yield assert_equal, km_in_cm.in_cgs(), 1e5 yield assert_equal, km_in_cm.in_mks(), 1e3 yield assert_equal, km_in_cm.units, cm_unit km_view = km.ndarray_view() km.convert_to_units('cm') assert_true(km_view.base is km.base) yield assert_equal, km, YTQuantity(1, 'km') yield assert_equal, km.in_cgs(), 1e5 yield assert_equal, km.in_mks(), 1e3 yield assert_equal, km.units, cm_unit km.convert_to_units('kpc') assert_true(km_view.base is km.base) yield assert_array_almost_equal_nulp, km, YTQuantity(1, 'km') yield assert_array_almost_equal_nulp, km.in_cgs(), YTQuantity(1e5, 'cm') yield assert_array_almost_equal_nulp, km.in_mks(), YTQuantity(1e3, 'm') yield assert_equal, km.units, kpc_unit yield assert_isinstance, km.to_ndarray(), np.ndarray yield assert_isinstance, km.ndarray_view(), np.ndarray dyne = YTQuantity(1.0, 'dyne') yield assert_equal, dyne.in_cgs(), dyne yield assert_equal, dyne.in_cgs(), 1.0 yield assert_equal, dyne.in_mks(), dyne yield assert_equal, dyne.in_mks(), 1e-5 yield assert_equal, str(dyne.in_mks().units), 'kg*m/s**2' yield assert_equal, str(dyne.in_cgs().units), 'cm*g/s**2' em3 = YTQuantity(1.0, 'erg/m**3') yield assert_equal, em3.in_cgs(), em3 yield assert_equal, em3.in_cgs(), 1e-6 yield assert_equal, em3.in_mks(), em3 yield assert_equal, em3.in_mks(), 1e-7 yield assert_equal, str(em3.in_mks().units), 'kg/(m*s**2)' yield assert_equal, str(em3.in_cgs().units), 'g/(cm*s**2)'
def test_temperature_conversions(): """ Test conversions between various supported temperatue scales. Also ensure we only allow compound units with temperature scales that have a proper zero point. """ from yt.units.unit_object import InvalidUnitOperation km = YTQuantity(1, 'km') balmy = YTQuantity(300, 'K') balmy_F = YTQuantity(80.33, 'degF') balmy_C = YTQuantity(26.85, 'degC') balmy_R = YTQuantity(540, 'R') assert_array_almost_equal(balmy.in_units('degF'), balmy_F) assert_array_almost_equal(balmy.in_units('degC'), balmy_C) assert_array_almost_equal(balmy.in_units('R'), balmy_R) balmy_view = balmy.ndarray_view() balmy.convert_to_units('degF') yield assert_true, balmy_view.base is balmy.base yield assert_array_almost_equal, np.array(balmy), np.array(balmy_F) balmy.convert_to_units('degC') yield assert_true, balmy_view.base is balmy.base yield assert_array_almost_equal, np.array(balmy), np.array(balmy_C) balmy.convert_to_units('R') yield assert_true, balmy_view.base is balmy.base yield assert_array_almost_equal, np.array(balmy), np.array(balmy_R) balmy.convert_to_units('degF') yield assert_true, balmy_view.base is balmy.base yield assert_array_almost_equal, np.array(balmy), np.array(balmy_F) yield assert_raises, InvalidUnitOperation, np.multiply, balmy, km # Does CGS conversion from F to K work? yield assert_array_almost_equal, balmy.in_cgs(), YTQuantity(300, 'K')
def test_equivalencies(): from yt.utilities.physical_constants import clight, mp, kboltz, hcgs, mh, me, \ mass_sun_cgs, G, stefan_boltzmann_constant_cgs import yt.units as u # Mass-energy E = mp.to_equivalent("keV","mass_energy") yield assert_equal, E, mp*clight*clight yield assert_allclose_units, mp, E.to_equivalent("g", "mass_energy") # Thermal T = YTQuantity(1.0e8,"K") E = T.to_equivalent("W*hr","thermal") yield assert_equal, E, (kboltz*T).in_units("W*hr") yield assert_allclose_units, T, E.to_equivalent("K", "thermal") # Spectral l = YTQuantity(4000.,"angstrom") nu = l.to_equivalent("Hz","spectral") yield assert_equal, nu, clight/l E = hcgs*nu l2 = E.to_equivalent("angstrom", "spectral") yield assert_allclose_units, l, l2 nu2 = clight/l2.in_units("cm") yield assert_allclose_units, nu, nu2 E2 = nu2.to_equivalent("keV", "spectral") yield assert_allclose_units, E2, E.in_units("keV") # Sound-speed mu = 0.6 gg = 5./3. c_s = T.to_equivalent("km/s","sound_speed") yield assert_equal, c_s, np.sqrt(gg*kboltz*T/(mu*mh)) yield assert_allclose_units, T, c_s.to_equivalent("K","sound_speed") mu = 0.5 gg = 4./3. c_s = T.to_equivalent("km/s","sound_speed", mu=mu, gamma=gg) yield assert_equal, c_s, np.sqrt(gg*kboltz*T/(mu*mh)) yield assert_allclose_units, T, c_s.to_equivalent("K","sound_speed", mu=mu, gamma=gg) # Lorentz v = 0.8*clight g = v.to_equivalent("dimensionless","lorentz") g2 = YTQuantity(1./np.sqrt(1.-0.8*0.8), "dimensionless") yield assert_allclose_units, g, g2 v2 = g2.to_equivalent("mile/hr", "lorentz") yield assert_allclose_units, v2, v.in_units("mile/hr") # Schwarzschild R = mass_sun_cgs.to_equivalent("kpc","schwarzschild") yield assert_equal, R.in_cgs(), 2*G*mass_sun_cgs/(clight*clight) yield assert_allclose_units, mass_sun_cgs, R.to_equivalent("g", "schwarzschild") # Compton l = me.to_equivalent("angstrom","compton") yield assert_equal, l, hcgs/(me*clight) yield assert_allclose_units, me, l.to_equivalent("g", "compton") # Number density rho = mp/u.cm**3 n = rho.to_equivalent("cm**-3","number_density") yield assert_equal, n, rho/(mh*0.6) yield assert_allclose_units, rho, n.to_equivalent("g/cm**3","number_density") n = rho.to_equivalent("cm**-3","number_density", mu=0.75) yield assert_equal, n, rho/(mh*0.75) yield assert_allclose_units, rho, n.to_equivalent("g/cm**3","number_density", mu=0.75) # Effective temperature T = YTQuantity(1.0e4, "K") F = T.to_equivalent("erg/s/cm**2","effective_temperature") yield assert_equal, F, stefan_boltzmann_constant_cgs*T**4 yield assert_allclose_units, T, F.to_equivalent("K", "effective_temperature")