def test_internal_geographic_coordinates(): # We're going to load up a simple AMR grid and check its volume # calculations and path length calculations. # Note that we are setting it up to have depth of 1000 maximum, which # means our volume will be that of a shell 1000 wide, starting at r of # outer_radius - 1000. ds = fake_amr_ds(geometry="internal_geographic") ds.outer_radius = ds.quan(5000, "code_length") axes = ["latitude", "longitude", "depth"] for i, axis in enumerate(axes): dd = ds.all_data() fi = ("index", axis) fd = ("index", "d%s" % axis) ma = np.argmax(dd[fi]) assert_equal(dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d) mi = np.argmin(dd[fi]) assert_equal(dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d) assert_equal(dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d) inner_r = ds.outer_radius - ds.domain_right_edge[2] outer_r = ds.outer_radius assert_equal(dd["index","dtheta"], dd["index","dlatitude"]*np.pi/180.0) assert_equal(dd["index","dphi"], dd["index","dlongitude"]*np.pi/180.0) assert_rel_equal(dd["cell_volume"].sum(dtype="float64"), (4.0/3.0) * np.pi * (outer_r**3 - inner_r**3), 10) assert_equal(dd["index", "path_element_depth"], dd["index", "ddepth"]) assert_equal(dd["index", "path_element_depth"], dd["index", "dr"]) # Note that latitude corresponds to theta, longitude to phi assert_equal(dd["index", "path_element_latitude"], dd["index", "r"] * dd["index", "dlatitude"] * np.pi/180.0) assert_equal(dd["index", "path_element_longitude"], (dd["index", "r"] * dd["index", "dlongitude"] * np.pi/180.0 * np.sin((dd["index", "latitude"] + 90.0) * np.pi/180.0))) # We also want to check that our radius is correct assert_equal(dd["index","r"], -1.0*dd["index","depth"] + ds.outer_radius)
def test_total_baryon_mass(): # gather most recent data set sim = sim_dir_load(_pf_name, path=_dir_name, find_outputs=True) if (sim.parameters['CosmologySimulationOmegaBaryonNow'] == 0.0): return sim.get_time_series() ds = sim[-1] data = ds.all_data() # sum masses Mstar = np.sum( data['particle_mass'][data['particle_type'] == 2].to('Msun')) Mgas = np.sum(data['cell_mass'].to('Msun')) output_data = {'masses': Mstar + Mgas} # save filename = "baryon_mass_results.h5" save_filename = os.path.join(_dir_name, filename) yt.save_as_dataset(ds, save_filename, output_data) compare_filename = os.path.join(test_data_dir, filename) if generate_answers: os.rename(save_filename, compare_filename) return ds_comp = yt.load(compare_filename) assert_rel_equal(output_data['masses'], ds_comp.data['masses'], tolerance)
def test_dark_matter_mass(): # gather most recent data set sim = sim_dir_load(_pf_name, path=_dir_name, find_outputs=True) sim.get_time_series() ds = sim[-1] data = ds.all_data() # sum masses MDM = np.sum( data[('all', 'particle_mass')][data[('all', 'particle_type')] == 1].to('Msun')) output_data = {('data', 'mass'): MDM} # save filename = "DM_mass_results.h5" save_filename = os.path.join(_dir_name, filename) yt.save_as_dataset(ds, save_filename, output_data) compare_filename = os.path.join(test_data_dir, filename) if generate_answers: os.rename(save_filename, compare_filename) return ds_comp = yt.load(compare_filename) assert_rel_equal(output_data[('data', 'mass')], ds_comp.data[('data', 'mass')], tolerance)
def test_data_collection(): # We decompose in different ways for nprocs in [1, 2, 4, 8]: ds = fake_random_ds(16, nprocs=nprocs) coll = ds.data_collection(ds.index.grids) crho = coll[("gas", "density")].sum(dtype="float64").to_ndarray() grho = np.sum( [ g[("gas", "density")].sum(dtype="float64") for g in ds.index.grids ], dtype="float64", ) assert_rel_equal(np.array([crho]), np.array([grho]), 12) assert_equal(coll.size, ds.domain_dimensions.prod()) for gi in range(ds.index.num_grids): grids = ds.index.grids[:gi + 1] coll = ds.data_collection(grids) crho = coll[("gas", "density")].sum(dtype="float64") grho = np.sum( [g[("gas", "density")].sum(dtype="float64") for g in grids], dtype="float64", ) assert_rel_equal(np.array([crho]), np.array([grho]), 12) assert_equal(coll.size, sum(g.ActiveDimensions.prod() for g in grids))
def test_index_unop(): np.random.seed(0x4D3D3D3) indices = np.arange(1000, dtype="int64") np.random.shuffle(indices) sizes = np.array([200, 50, 50, 100, 32, 32, 32, 32, 32, 64, 376], dtype="int64") for mi, ma, dtype in dtypes: for op, operation in operations: # Create a random set of values values = np.random.random(1000) if operation != "prod": values = values * ma + (ma - mi) if operation == "prod" and dtype.startswith("int"): values = values.astype(dtype) values[values != 0] = 1 values[values == 0] = -1 values = values.astype(dtype) out_values = index_unop(values, indices, sizes, operation) i = 0 for j, v in enumerate(sizes): arr = values[indices[i:i + v]] if dtype == "float32": # Numpy 1.9.1 changes the accumulator type to promote assert_rel_equal(op(arr), out_values[j], 6) elif dtype == "float64": # Numpy 1.9.1 changes the accumulator type to promote assert_rel_equal(op(arr), out_values[j], 12) else: assert_equal(op(arr), out_values[j]) i += v
def compare(self, new_result, old_result): err_msg = "All field values for %s not equal." % self.field if self.decimals is None: assert_equal(new_result, old_result, err_msg=err_msg, verbose=True) else: assert_rel_equal(new_result, old_result, self.decimals, err_msg=err_msg, verbose=True)
def test_set_width_nonequal(self): self.slc.set_width((0.5, 0.8)) assert_rel_equal( [self.slc.xlim, self.slc.ylim, self.slc.width], [(0.25, 0.75), (0.1, 0.9), (0.5, 0.8)], 15, ) assert_true(self.slc._axes_unit_names is None)
def test_z_t_analytic(): """ Test z/t conversions against analytic solutions. """ cosmos = ( { "hubble_constant": 0.7, "omega_matter": 0.3, "omega_lambda": 0.7 }, { "hubble_constant": 0.7, "omega_matter": 1.0, "omega_lambda": 0.0 }, { "hubble_constant": 0.7, "omega_matter": 0.3, "omega_lambda": 0.0 }, ) for cosmo in cosmos: omega_curvature = 1 - cosmo["omega_matter"] - cosmo["omega_lambda"] co = Cosmology(omega_curvature=omega_curvature, **cosmo) # random sample in log(a) from -6 to 6 my_random = np.random.RandomState(10132324) la = 12 * my_random.random_sample(1000) - 6 z = 1 / np.power(10, la) - 1 t_an = t_from_z_analytic(z, **cosmo).to("Gyr") t_co = co.t_from_z(z).to("Gyr") assert_rel_equal( t_an, t_co, 4, err_msg= f"t_from_z does not match analytic version for cosmology {cosmo}.", ) # random sample in log(t/t0) from -3 to 1 t0 = np.power(10, 4 * my_random.random_sample(1000) - 3) t = (t0 / co.hubble_constant).to("Gyr") z_an = z_from_t_analytic(t, **cosmo) z_co = co.z_from_t(t) # compare scale factors since z approaches 0 assert_rel_equal( 1 / (1 + z_an), 1 / (1 + z_co), 5, err_msg= f"z_from_t does not match analytic version for cosmology {cosmo}.", )
def test_hubble_time(): """ Make sure hubble_time and t_from_z functions agree. """ for i in range(10): co = Cosmology() # random sample over interval (-1,100] z = -101 * np.random.random() + 100 assert_rel_equal(co.hubble_time(z), co.t_from_z(z), 5)
def test_average(): for nprocs in [1, 2, 4, 8]: ds = fake_random_ds(16, nprocs = nprocs, fields = ("density",)) for ad in [ds.all_data(), ds.r[0.5, :, :]]: my_mean = ad.quantities["WeightedAverageQuantity"]("density", "ones") assert_rel_equal(my_mean, ad["density"].mean(), 12) my_mean = ad.quantities["WeightedAverageQuantity"]("density", "cell_mass") a_mean = (ad["density"] * ad["cell_mass"]).sum() / ad["cell_mass"].sum() assert_rel_equal(my_mean, a_mean, 12)
def compare(self, new_result, old_result): err_msg = f"All field values for {self.field} not equal." if hasattr(new_result, "d"): new_result = new_result.d if hasattr(old_result, "d"): old_result = old_result.d if self.decimals is None: assert_equal(new_result, old_result, err_msg=err_msg, verbose=True) else: assert_rel_equal( new_result, old_result, self.decimals, err_msg=err_msg, verbose=True )
def test_z_t_conversion(): """ Make sure t_from_z and z_from_t are consistent. """ for i in range(10): co = Cosmology() # random sample over interval (-1,100] z1 = -101 * np.random.random() + 100 t = co.t_from_z(z1) z2 = co.z_from_t(t) assert_rel_equal(z1, z2, 10)
def test_z_t_roundtrip(): """ Make sure t_from_z and z_from_t are consistent. """ co = Cosmology() # random sample in log(a) from -6 to 6 my_random = np.random.RandomState(6132305) la = 12 * my_random.random_sample(10000) - 6 z1 = 1 / np.power(10, la) - 1 t = co.t_from_z(z1) z2 = co.z_from_t(t) assert_rel_equal(z1, z2, 4)
def test_covering_grid(): return # We decompose in different ways cs = np.mgrid[0.47:0.53:2j,0.47:0.53:2j,0.47:0.53:2j] cs = np.array([a.ravel() for a in cs]).T length = (1.0/128) * 16 # 16 half-widths of a cell for nprocs in [1, 2, 4, 8]: ds = fake_random_ds(64, nprocs = nprocs, fields = _fields) streams = Streamlines(ds, cs, length=length) streams.integrate_through_volume() for path in (streams.path(i) for i in range(8)): assert_rel_equal(path['dts'].sum(), 1.0, 14) assert_equal(np.all(path['t'] <= (1.0 + 1e-10)), True) path["density"]
def test_max_density_halo_quantities(): ds = yt.load(os.path.join(_dir_name, 'RD0009/RD0009')) # Find the point of maximum density, center a sphere of radius # 1 Mpc around it, and sum the masses inside val, pos = ds.find_max('Density') sp = ds.sphere(pos, (1000., 'kpc')) ct = sp['creation_time'] dm = (ct < 0) dm_mass = np.sum(sp['particle_mass'][dm]).in_units('Msun') gas_mass = np.sum(sp['cell_mass'].in_units('Msun')) # Also look at the radial profiles of density and temperature # within these spheres. The bin size is chosen to make the profiles # smooth and for each bin to be larger than the cell size. ptest0 = yt.create_profile(sp, "radius", "density", n_bins=[20]) ptest1 = yt.create_profile(sp, "radius", "temperature", n_bins=[20]) # Save the quantities to be compared data = { "dm_mass": dm_mass, "gas_mass": gas_mass, "max_position": pos, "density_profile": ptest0['density'], "temperature_profile": ptest1['temperature'] } # save your results file filename = "max_density_halo_quantities.h5" save_filename = os.path.join(_dir_name, filename) yt.save_as_dataset(ds, save_filename, data) compare_filename = os.path.join(test_data_dir, filename) if generate_answers: os.rename(save_filename, compare_filename) return ds_comp = yt.load(compare_filename) assert_rel_equal(data["dm_mass"], ds_comp.data["dm_mass"], tolerance) assert_rel_equal(data["gas_mass"], ds_comp.data["gas_mass"], tolerance) assert_rel_equal(data["max_position"], ds_comp.data["max_position"], tolerance) assert_rel_equal(data["density_profile"], ds_comp.data["density_profile"], tolerance) assert_rel_equal(data["temperature_profile"], ds_comp.data["temperature_profile"], tolerance)
def test_hmf(): es = sim_dir_load(_pf_name, path=_dir_name) es.get_time_series() ds = es[-1] hc = HaloCatalog(data_ds=ds, finder_method='fof', output_dir=os.path.join(_dir_name, "halo_catalogs/catalog")) hc.create() masses = hc.data_source['particle_mass'].in_units('Msun') h = ds.hubble_constant mtot = np.log10(masses * 1.2) - np.log10(h) masses_sim = np.sort(mtot) sim_volume = ds.domain_width.in_units('Mpccm').prod() n_cumulative_sim = np.arange(len(mtot), 0, -1) masses_sim, unique_indices = np.unique(masses_sim, return_index=True) n_cumulative_sim = n_cumulative_sim[unique_indices] / sim_volume filename = 'hmf.h5' save_filename = os.path.join(_dir_name, filename) data = {'masses': masses_sim, 'n_sim': n_cumulative_sim} yt.save_as_dataset(ds, save_filename, data) # make a plot fig = plt.figure(figsize=(8, 8)) plt.semilogy(masses_sim, n_cumulative_sim, '-') plt.ylabel('Cumulative Halo Number Density $\mathrm{Mpc}^{-3}$', fontsize=16) plt.xlabel('log Mass/$\mathrm{M}_{\odot}$', fontsize=16) plt.tick_params(labelsize=16) plt.savefig(os.path.join(_dir_name, 'hmf.png'), format='png') compare_filename = os.path.join(test_data_dir, filename) if generate_answers: os.rename(save_filename, compare_filename) return # do the comparison ds_comp = yt.load(compare_filename) # assert quality to 8 decimals assert_rel_equal(data['masses'], ds_comp.data['masses'], 8) assert_rel_equal(data['n_sim'], ds_comp.data['n_sim'], 8)
def test_phase(): es = sim_dir_load(_pf_name, path=_dir_name) es.get_time_series(redshifts=[0]) ds = es[-1] ad = ds.all_data() profile = ad.profile([("gas", "density")], [("gas", "temperature"), ("gas", "cell_mass")]) profile1 = ad.profile([("gas", "density")], [("gas", "temperature"), ("gas", "cooling_time")], weight_field=('gas', 'cell_mass')) density = profile.x temperature = profile[('gas', 'temperature')] cooling_time = profile1[('gas', 'cooling_time')] cell_mass = profile[('gas', 'cell_mass')] filename = 'phase_data.h5' save_filename = os.path.join(_dir_name, filename) data = { ('data', 'density'): density, ('data', 'temperature'): temperature, ('data', 'cooling_time'): cooling_time, ('data', 'cell_mass'): cell_mass } yt.save_as_dataset(ds, save_filename, data) pp = yt.PhasePlot(ad, ('gas', 'density'), ('gas', 'temperature'), ('gas', 'cell_mass')) pp.set_unit(('gas', 'cell_mass'), 'Msun') pp.save(_dir_name) pp1 = yt.PhasePlot(ad, ('gas', 'density'), ('gas', 'temperature'), ('gas', 'cooling_time'), weight_field=('gas', 'cell_mass')) pp1.save(_dir_name) compare_filename = os.path.join(test_data_dir, filename) if generate_answers: os.rename(save_filename, compare_filename) return # do the comparison ds_comp = yt.load(compare_filename) # assert quality to 8 decimals assert_rel_equal(data[('data', 'density')], ds_comp.data[('data', 'density')], 8) assert_rel_equal(data[('data', 'temperature')], ds_comp.data[('data', 'temperature')], 8) assert_rel_equal(data[('data', 'cooling_time')], ds_comp.data[('data', 'cooling_time')], 8) assert_rel_equal(data[('data', 'cell_mass')], ds_comp.data[('data', 'cell_mass')], 8)
def test_variance(): for nprocs in [1, 2, 4, 8]: ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", )) for ad in [ds.all_data(), ds.r[0.5, :, :]]: my_std, my_mean = ad.quantities["WeightedVariance"]("density", "ones") assert_rel_equal(my_mean, ad["density"].mean(), 12) assert_rel_equal(my_std, ad["density"].std(), 12) my_std, my_mean = ad.quantities["WeightedVariance"]("density", "cell_mass") a_mean = (ad["density"] * ad["cell_mass"]).sum() / ad["cell_mass"].sum() assert_rel_equal(my_mean, a_mean, 12) a_std = np.sqrt((ad["cell_mass"] * (ad["density"] - a_mean)**2).sum() / ad["cell_mass"].sum()) assert_rel_equal(my_std, a_std, 12)
def test_ray(): for nproc in [1, 2, 4, 8]: ds = fake_random_ds(64, nprocs=nproc) dx = (ds.domain_right_edge - ds.domain_left_edge) / ds.domain_dimensions # Three we choose, to get varying vectors, and ten random pp1 = np.random.random((3, 13)) pp2 = np.random.random((3, 13)) pp1[:, 0] = [0.1, 0.2, 0.3] pp2[:, 0] = [0.8, 0.1, 0.4] pp1[:, 1] = [0.9, 0.2, 0.3] pp2[:, 1] = [0.8, 0.1, 0.4] pp1[:, 2] = [0.9, 0.2, 0.9] pp2[:, 2] = [0.8, 0.1, 0.4] unitary = ds.arr(1.0, "") for i in range(pp1.shape[1]): p1 = ds.arr(pp1[:, i] + 1e-8 * np.random.random(3), "code_length") p2 = ds.arr(pp2[:, i] + 1e-8 * np.random.random(3), "code_length") my_ray = ds.ray(p1, p2) assert_rel_equal(my_ray["dts"].sum(), unitary, 14) ray_cells = my_ray["dts"] > 0 # find cells intersected by the ray my_all = ds.all_data() dt = np.abs(dx / (p2 - p1)) tin = uconcatenate( [ [(my_all[("index", "x")] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]], [(my_all[("index", "y")] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]], [(my_all[("index", "z")] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]], ] ) tout = uconcatenate( [ [(my_all[("index", "x")] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]], [(my_all[("index", "y")] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]], [(my_all[("index", "z")] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]], ] ) tin = tin.max(axis=0) tout = tout.min(axis=0) my_cells = (tin < tout) & (tin < 1) & (tout > 0) assert_equal(ray_cells.sum(), my_cells.sum()) assert_rel_equal( my_ray[("gas", "density")][ray_cells].sum(), my_all[("gas", "density")][my_cells].sum(), 14, ) assert_rel_equal(my_ray["dts"].sum(), unitary, 14)
def test_standard_deviation(): for nprocs in [1, 2, 4, 8]: ds = fake_random_ds(16, nprocs=nprocs, fields=("density", ), units=("g/cm**3", )) for ad in [ds.all_data(), ds.r[0.5, :, :]]: my_std, my_mean = ad.quantities["WeightedStandardDeviation"]( ("gas", "density"), ("index", "ones")) assert_rel_equal(my_mean, ad[("gas", "density")].mean(), 12) assert_rel_equal(my_std, ad[("gas", "density")].std(), 12) my_std, my_mean = ad.quantities["WeightedStandardDeviation"]( ("gas", "density"), ("gas", "cell_mass")) a_mean = (ad[("gas", "density")] * ad[ ("gas", "cell_mass")]).sum() / ad[("gas", "cell_mass")].sum() assert_rel_equal(my_mean, a_mean, 12) a_std = np.sqrt((ad[("gas", "cell_mass")] * (ad[("gas", "density")] - a_mean)**2).sum() / ad[("gas", "cell_mass")].sum()) assert_rel_equal(my_std, a_std, 12)
def test_set_width_nonequal(self): self.slc.set_width((0.5, 0.8)) assert_rel_equal([self.slc.xlim, self.slc.ylim, self.slc.width], [(0.25, 0.75), (0.1, 0.9), (0.5, 0.8)], 15) assert_true(self.slc._axes_unit_names is None)
def assert_array_rel_equal(a1, a2, decimals=16, **kwargs): """ Wraps assert_rel_equal with, but decimals is a keyword arg. """ assert_rel_equal(a1, a2, decimals, **kwargs)
def test_profiles(): ds = fake_random_ds(64, nprocs=8, fields=_fields, units=_units) nv = ds.domain_dimensions.prod() dd = ds.all_data() (rmi, rma), (tmi, tma), (dmi, dma) = dd.quantities["Extrema"]( ["density", "temperature", "dinosaurs"]) rt, tt, dt = dd.quantities["TotalQuantity"]( ["density", "temperature", "dinosaurs"]) e1, e2 = 0.9, 1.1 for nb in [8, 16, 32, 64]: for input_units in ['mks', 'cgs']: for ex in [rmi, rma, tmi, tma, dmi, dma]: getattr(ex, 'convert_to_%s' % input_units)() # We log all the fields or don't log 'em all. No need to do them # individually. for lf in [True, False]: direct_profile = Profile1D(dd, "density", nb, rmi * e1, rma * e2, lf, weight_field=None) direct_profile.add_fields(["ones", "temperature"]) indirect_profile_s = create_profile( dd, "density", ["ones", "temperature"], n_bins=nb, extrema={'density': (rmi * e1, rma * e2)}, logs={'density': lf}, weight_field=None) indirect_profile_t = create_profile( dd, ("gas", "density"), [("index", "ones"), ("gas", "temperature")], n_bins=nb, extrema={'density': (rmi * e1, rma * e2)}, logs={'density': lf}, weight_field=None) for p1d in [ direct_profile, indirect_profile_s, indirect_profile_t ]: assert_equal(p1d["index", "ones"].sum(), nv) assert_rel_equal(tt, p1d["gas", "temperature"].sum(), 7) p2d = Profile2D(dd, "density", nb, rmi * e1, rma * e2, lf, "temperature", nb, tmi * e1, tma * e2, lf, weight_field=None) p2d.add_fields(["ones", "temperature"]) assert_equal(p2d["ones"].sum(), nv) assert_rel_equal(tt, p2d["temperature"].sum(), 7) p3d = Profile3D(dd, "density", nb, rmi * e1, rma * e2, lf, "temperature", nb, tmi * e1, tma * e2, lf, "dinosaurs", nb, dmi * e1, dma * e2, lf, weight_field=None) p3d.add_fields(["ones", "temperature"]) assert_equal(p3d["ones"].sum(), nv) assert_rel_equal(tt, p3d["temperature"].sum(), 7) p1d = Profile1D(dd, "x", nb, 0.0, 1.0, False, weight_field=None) p1d.add_fields("ones") av = nv / nb assert_equal(p1d["ones"], np.ones(nb) * av) # We re-bin ones with a weight now p1d = Profile1D(dd, "x", nb, 0.0, 1.0, False, weight_field="temperature") p1d.add_fields(["ones"]) assert_equal(p1d["ones"], np.ones(nb)) # Verify we can access "ones" after adding a new field # See issue 988 p1d.add_fields(["density"]) assert_equal(p1d["ones"], np.ones(nb)) p2d = Profile2D(dd, "x", nb, 0.0, 1.0, False, "y", nb, 0.0, 1.0, False, weight_field=None) p2d.add_fields("ones") av = nv / nb**2 assert_equal(p2d["ones"], np.ones((nb, nb)) * av) # We re-bin ones with a weight now p2d = Profile2D(dd, "x", nb, 0.0, 1.0, False, "y", nb, 0.0, 1.0, False, weight_field="temperature") p2d.add_fields(["ones"]) assert_equal(p2d["ones"], np.ones((nb, nb))) p3d = Profile3D(dd, "x", nb, 0.0, 1.0, False, "y", nb, 0.0, 1.0, False, "z", nb, 0.0, 1.0, False, weight_field=None) p3d.add_fields("ones") av = nv / nb**3 assert_equal(p3d["ones"], np.ones((nb, nb, nb)) * av) # We re-bin ones with a weight now p3d = Profile3D(dd, "x", nb, 0.0, 1.0, False, "y", nb, 0.0, 1.0, False, "z", nb, 0.0, 1.0, False, weight_field="temperature") p3d.add_fields(["ones"]) assert_equal(p3d["ones"], np.ones((nb, nb, nb))) p2d = create_profile(dd, ('gas', 'density'), ('gas', 'temperature'), weight_field=('gas', 'cell_mass'), extrema={'density': (None, rma * e2)}) assert_equal(p2d.x_bins[0], rmi - np.spacing(rmi)) assert_equal(p2d.x_bins[-1], rma * e2) p2d = create_profile(dd, ('gas', 'density'), ('gas', 'temperature'), weight_field=('gas', 'cell_mass'), extrema={'density': (rmi * e2, None)}) assert_equal(p2d.x_bins[0], rmi * e2) assert_equal(p2d.x_bins[-1], rma + np.spacing(rma))
def test_projection(pf): fns = [] for nprocs in [8, 1]: # We want to test both 1 proc and 8 procs, to make sure that # parallelism isn't broken fields = ("density", "temperature", "velocity_x", "velocity_y", "velocity_z") units = ("g/cm**3", "K", "cm/s", "cm/s", "cm/s") ds = fake_random_ds(64, fields=fields, units=units, nprocs=nprocs, length_unit=LENGTH_UNIT) dims = ds.domain_dimensions xn, yn, zn = ds.domain_dimensions xi, yi, zi = ds.domain_left_edge.to_ndarray() + 1.0 / ( ds.domain_dimensions * 2) xf, yf, zf = ds.domain_right_edge.to_ndarray() - 1.0 / ( ds.domain_dimensions * 2) dd = ds.all_data() coords = np.mgrid[xi:xf:xn * 1j, yi:yf:yn * 1j, zi:zf:zn * 1j] uc = [np.unique(c) for c in coords] # test if projections inherit the field parameters of their data sources dd.set_field_parameter("bulk_velocity", np.array([0, 1, 2])) proj = ds.proj(("gas", "density"), 0, data_source=dd) assert_equal(dd.field_parameters["bulk_velocity"], proj.field_parameters["bulk_velocity"]) # Some simple projection tests with single grids for ax, an in enumerate("xyz"): xax = ds.coordinates.x_axis[ax] yax = ds.coordinates.y_axis[ax] for wf in [("gas", "density"), None]: proj = ds.proj([("index", "ones"), ("gas", "density")], ax, weight_field=wf) if wf is None: assert_equal( proj[("index", "ones")].sum(), LENGTH_UNIT * proj[("index", "ones")].size, ) assert_equal(proj[("index", "ones")].min(), LENGTH_UNIT) assert_equal(proj[("index", "ones")].max(), LENGTH_UNIT) else: assert_equal(proj[("index", "ones")].sum(), proj[("index", "ones")].size) assert_equal(proj[("index", "ones")].min(), 1.0) assert_equal(proj[("index", "ones")].max(), 1.0) assert_equal(np.unique(proj["px"]), uc[xax]) assert_equal(np.unique(proj["py"]), uc[yax]) assert_equal(np.unique(proj["pdx"]), 1.0 / (dims[xax] * 2.0)) assert_equal(np.unique(proj["pdy"]), 1.0 / (dims[yax] * 2.0)) plots = [proj.to_pw(fields=("gas", "density")), proj.to_pw()] for pw in plots: for p in pw.plots.values(): tmpfd, tmpname = tempfile.mkstemp(suffix=".png") os.close(tmpfd) p.save(name=tmpname) fns.append(tmpname) frb = proj.to_frb((1.0, "unitary"), 64) for proj_field in [ ("index", "ones"), ("gas", "density"), ("gas", "temperature"), ]: fi = ds._get_field_info(proj_field) assert_equal(frb[proj_field].info["data_source"], proj.__str__()) assert_equal(frb[proj_field].info["axis"], ax) assert_equal(frb[proj_field].info["field"], str(proj_field)) field_unit = Unit(fi.units) if wf is not None: assert_equal( frb[proj_field].units, Unit(field_unit, registry=ds.unit_registry), ) else: if frb[proj_field].units.is_code_unit: proj_unit = "code_length" else: proj_unit = "cm" if field_unit != "" and field_unit != Unit(): proj_unit = f"({field_unit}) * {proj_unit}" assert_equal( frb[proj_field].units, Unit(proj_unit, registry=ds.unit_registry), ) assert_equal(frb[proj_field].info["xlim"], frb.bounds[:2]) assert_equal(frb[proj_field].info["ylim"], frb.bounds[2:]) assert_equal(frb[proj_field].info["center"], proj.center) if wf is None: assert_equal(frb[proj_field].info["weight_field"], wf) else: assert_equal( frb[proj_field].info["weight_field"], proj.data_source._determine_fields(wf)[0], ) # wf == None assert_equal(wf, None) v1 = proj[("gas", "density")].sum() v2 = (dd[("gas", "density")] * dd[("index", f"d{an}")]).sum() assert_rel_equal(v1, v2.in_units(v1.units), 10) teardown_func(fns)
def compare(self, new_result, old_result): assert (len(new_result) == len(old_result)) for k in new_result: assert (k in old_result) for k in new_result: assert_rel_equal(new_result[k], old_result[k], 10)