Exemplo n.º 1
0
def test_magnetic_fields():

    ddims = (16, 16, 16)
    data1 = {
        "magnetic_field_x": (np.random.random(size=ddims), "T"),
        "magnetic_field_y": (np.random.random(size=ddims), "T"),
        "magnetic_field_z": (np.random.random(size=ddims), "T"),
    }
    data2 = {}
    for field in data1:
        data2[field] = (data1[field][0] * 1.0e4, "gauss")

    ds1 = load_uniform_grid(data1, ddims, unit_system="cgs")
    ds2 = load_uniform_grid(data2, ddims, unit_system="mks")
    # For this test dataset, code units are cgs units
    ds3 = load_uniform_grid(data2, ddims, unit_system="code")

    ds1.index
    ds2.index
    ds3.index

    dd1 = ds1.all_data()
    dd2 = ds2.all_data()
    dd3 = ds3.all_data()

    assert ds1.fields.gas.magnetic_field_strength.units == "G"
    assert ds1.fields.gas.magnetic_field_poloidal.units == "G"
    assert ds1.fields.gas.magnetic_field_toroidal.units == "G"
    assert ds2.fields.gas.magnetic_field_strength.units == "T"
    assert ds2.fields.gas.magnetic_field_poloidal.units == "T"
    assert ds2.fields.gas.magnetic_field_toroidal.units == "T"
    assert ds3.fields.gas.magnetic_field_strength.units == "code_magnetic"
    assert ds3.fields.gas.magnetic_field_poloidal.units == "code_magnetic"
    assert ds3.fields.gas.magnetic_field_toroidal.units == "code_magnetic"

    emag1 = (dd1["magnetic_field_x"]**2 + dd1["magnetic_field_y"]**2 +
             dd1["magnetic_field_z"]**2) / (8.0 * np.pi)
    emag1.convert_to_units("dyne/cm**2")

    emag2 = (dd2["magnetic_field_x"]**2 + dd2["magnetic_field_y"]**2 +
             dd2["magnetic_field_z"]**2) / (2.0 * mu_0)
    emag2.convert_to_units("Pa")

    emag3 = (dd3["magnetic_field_x"]**2 + dd3["magnetic_field_y"]**2 +
             dd3["magnetic_field_z"]**2) / (2.0 * mu_0)
    emag3.convert_to_units("code_pressure")

    assert_almost_equal(emag1, dd1["magnetic_energy"])
    assert_almost_equal(emag2, dd2["magnetic_energy"])
    assert_almost_equal(emag3, dd3["magnetic_energy"])

    assert str(emag1.units) == str(dd1["magnetic_energy"].units)
    assert str(emag2.units) == str(dd2["magnetic_energy"].units)
    assert str(emag3.units) == str(dd3["magnetic_energy"].units)

    assert_almost_equal(emag1.in_cgs(), emag2.in_cgs())
    assert_almost_equal(emag1.in_cgs(), emag3.in_cgs())
Exemplo n.º 2
0
def test_ppv_nothermalbroad():

    np.random.seed(seed=0x4D3D3D3)

    dims = (16, 16, 128)
    v_shift = 1.0e6 * u.cm / u.s
    sigma_v = 2.0e6 * u.cm / u.s
    data = {
        "density": (np.ones(dims), "g/cm**3"),
        "velocity_x": (np.zeros(dims), "cm/s"),
        "velocity_y": (np.zeros(dims), "cm/s"),
        "velocity_z": (
            np.random.normal(loc=v_shift.v, scale=sigma_v.v, size=dims),
            "cm/s",
        ),
    }

    ds = load_uniform_grid(data, dims)

    cube = PPVCube(
        ds,
        "z",
        ("stream", "density"),
        (-100.0, 100.0, 128, "km/s"),
        dims=16,
        thermal_broad=False,
    )

    dv = cube.dv
    v_noth = np.sqrt(2) * (sigma_v).in_units("km/s")
    a = cube.data.mean(axis=(0, 1)).v
    b = (dv * np.exp(-(((cube.vmid + v_shift) / v_noth)**2)) /
         (np.sqrt(np.pi) * v_noth))

    assert_allclose_units(a, b, atol=5.0e-3)
Exemplo n.º 3
0
def test_stream_non_cartesian_particles(loader):
    eps = 1e-6
    r, theta, phi = np.mgrid[0.0:1.0 - eps:64j, 0.0:np.pi - eps:64j,
                             0.0:2.0 * np.pi - eps:64j]
    np.random.seed(0x4D3D3D3)
    ind = np.random.randint(0, 64 * 64 * 64, size=1000)

    particle_position_r = r.ravel()[ind]
    particle_position_theta = theta.ravel()[ind]
    particle_position_phi = phi.ravel()[ind]

    ds = load_uniform_grid(
        {
            "density": r,
            "temperature": phi,
            "entropy": phi,
            "particle_position_r": particle_position_r,
            "particle_position_theta": particle_position_theta,
            "particle_position_phi": particle_position_phi,
        },
        (64, 64, 64),
        bbox=np.array([[0.0, 1.0], [0.0, np.pi], [0.0, 2.0 * np.pi]]),
        geometry="spherical",
    )

    dd = ds.all_data()
    assert_equal(dd["all", "particle_position_r"].v, particle_position_r)
    assert_equal(dd["all", "particle_position_phi"].v, particle_position_phi)
    assert_equal(dd["all", "particle_position_theta"].v,
                 particle_position_theta)
Exemplo n.º 4
0
def test_on_off_compare():
    # fake density field that varies in the x-direction only
    den = np.arange(32**3) / 32**2 + 1
    den = den.reshape(32, 32, 32)
    den = np.array(den, dtype=np.float64)
    data = dict(density=(den, "g/cm**3"))
    bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])
    ds = load_uniform_grid(data,
                           den.shape,
                           length_unit="Mpc",
                           bbox=bbox,
                           nprocs=64)

    sl_on = SlicePlot(ds, "z", [("gas", "density")])

    L = [0, 0, 1]
    north_vector = [0, 1, 0]
    sl_off = OffAxisSlicePlot(ds,
                              L, ("gas", "density"),
                              center=[0, 0, 0],
                              north_vector=north_vector)

    assert_array_almost_equal(sl_on.frb[("gas", "density")],
                              sl_off.frb[("gas", "density")])

    sl_on.set_buff_size((800, 400))
    sl_on._recreate_frb()
    sl_off.set_buff_size((800, 400))
    sl_off._recreate_frb()

    assert_array_almost_equal(sl_on.frb[("gas", "density")],
                              sl_off.frb[("gas", "density")])
Exemplo n.º 5
0
def test_ds_arr_invariance_under_projection_plot(tmp_path):
    data_array = np.random.random((10, 10, 10))
    bbox = np.array([[-100, 100], [-100, 100], [-100, 100]])
    data = {("gas", "density"): (data_array, "g*cm**(-3)")}
    ds = load_uniform_grid(data,
                           data_array.shape,
                           length_unit="kpc",
                           bbox=bbox)

    start_source = np.array((0, 0, -0.5))
    end_source = np.array((0, 0, 0.5))
    start = ds.arr(start_source, "unitary")
    end = ds.arr(end_source, "unitary")

    start_i = start.copy()
    end_i = end.copy()

    p = ProjectionPlot(ds, 0, "number_density")
    p.annotate_line(start, end)
    p.save(tmp_path)

    # for lack of a unyt.testing.assert_unit_array_equal function
    np.testing.assert_array_equal(start_i, start)
    assert start_i.units == start.units
    np.testing.assert_array_equal(end_i, end)
    assert end_i.units == end.units
Exemplo n.º 6
0
def test_exclude_nan():
    test_array = np.nan * np.ones((10, 10, 10))
    test_array[1, 1, :] = 1
    data = dict(density=test_array)
    ds = load_uniform_grid(data, test_array.shape, length_unit="cm", nprocs=1)
    ad = ds.all_data()
    no_nan_ds = ad.exclude_nan(("gas", "density"))
    assert_equal(no_nan_ds[("gas", "density")], np.array(np.ones(10)))
Exemplo n.º 7
0
def fake_random_ds(
    ndims,
    peak_value=1.0,
    fields=("density", "velocity_x", "velocity_y", "velocity_z"),
    units=("g/cm**3", "cm/s", "cm/s", "cm/s"),
    particle_fields=None,
    particle_field_units=None,
    negative=False,
    nprocs=1,
    particles=0,
    length_unit=1.0,
    unit_system="cgs",
    bbox=None,
):
    from yt.loaders import load_uniform_grid

    prng = RandomState(0x4D3D3D3)
    if not is_sequence(ndims):
        ndims = [ndims, ndims, ndims]
    else:
        assert len(ndims) == 3
    if not is_sequence(negative):
        negative = [negative for f in fields]
    assert len(fields) == len(negative)
    offsets = []
    for n in negative:
        if n:
            offsets.append(0.5)
        else:
            offsets.append(0.0)
    data = {}
    for field, offset, u in zip(fields, offsets, units):
        v = (prng.random_sample(ndims) - offset) * peak_value
        if field[0] == "all":
            v = v.ravel()
        data[field] = (v, u)
    if particles:
        if particle_fields is not None:
            for field, unit in zip(particle_fields, particle_field_units):
                if field in ("particle_position", "particle_velocity"):
                    data["io", field] = (prng.random_sample((int(particles), 3)), unit)
                else:
                    data["io", field] = (prng.random_sample(size=int(particles)), unit)
        else:
            for f in (f"particle_position_{ax}" for ax in "xyz"):
                data["io", f] = (prng.random_sample(size=particles), "code_length")
            for f in (f"particle_velocity_{ax}" for ax in "xyz"):
                data["io", f] = (prng.random_sample(size=particles) - 0.5, "cm/s")
            data["io", "particle_mass"] = (prng.random_sample(particles), "g")
    ug = load_uniform_grid(
        data,
        ndims,
        length_unit=length_unit,
        nprocs=nprocs,
        unit_system=unit_system,
        bbox=bbox,
    )
    return ug
Exemplo n.º 8
0
def test_magnetic_code_units():

    sqrt4pi = np.sqrt(4.0 * np.pi)
    ddims = (16,) * 3
    data = {"density": (np.random.uniform(size=ddims), "g/cm**3")}

    ds1 = load_uniform_grid(
        data, ddims, magnetic_unit=(sqrt4pi, "gauss"), unit_system="cgs"
    )

    assert_allclose(ds1.magnetic_unit.value, sqrt4pi)
    assert str(ds1.magnetic_unit.units) == "G"

    mucu = ds1.magnetic_unit.to("code_magnetic")
    assert_allclose(mucu.value, 1.0)
    assert str(mucu.units) == "code_magnetic"

    ds2 = load_uniform_grid(data, ddims, magnetic_unit=(1.0, "T"), unit_system="cgs")

    assert_allclose(ds2.magnetic_unit.value, 10000.0)
    assert str(ds2.magnetic_unit.units) == "G"

    mucu = ds2.magnetic_unit.to("code_magnetic")
    assert_allclose(mucu.value, 1.0)
    assert str(mucu.units) == "code_magnetic"

    ds3 = load_uniform_grid(data, ddims, magnetic_unit=(1.0, "T"), unit_system="mks")

    assert_allclose(ds3.magnetic_unit.value, 1.0)
    assert str(ds3.magnetic_unit.units) == "T"

    mucu = ds3.magnetic_unit.to("code_magnetic")
    assert_allclose(mucu.value, 1.0)
    assert str(mucu.units) == "code_magnetic"

    ds4 = load_uniform_grid(
        data, ddims, magnetic_unit=(1.0, "gauss"), unit_system="mks"
    )

    assert_allclose(ds4.magnetic_unit.value, 1.0e-4)
    assert str(ds4.magnetic_unit.units) == "T"

    mucu = ds4.magnetic_unit.to("code_magnetic")
    assert_allclose(mucu.value, 1.0)
    assert str(mucu.units) == "code_magnetic"
Exemplo n.º 9
0
def test_equal():
    test_array = np.ones((10, 10, 10))
    test_array[1, 1, :] = 2.0
    test_array[2, 1, :] = 3.0
    data = dict(density=test_array)
    ds = load_uniform_grid(data, test_array.shape, length_unit="cm", nprocs=1)
    ad = ds.all_data()
    no_ones = ad.exclude_equal(("gas", "density"), 1.0)
    assert np.all(no_ones[("gas", "density")] != 1.0)
    only_ones = ad.include_equal(("gas", "density"), 1.0)
    assert np.all(only_ones[("gas", "density")] == 1.0)
Exemplo n.º 10
0
def test_nan_data():
    data = np.random.random((16, 16, 16)) - 0.5
    data[:9, :9, :9] = np.nan

    data = {"density": data}

    ds = load_uniform_grid(data, [16, 16, 16])

    plot = SlicePlot(ds, "z", ("gas", "density"))

    with tempfile.NamedTemporaryFile(suffix="png") as f:
        plot.save(f.name)
Exemplo n.º 11
0
def test_symlog_extremely_small_vals():
    # check that the plot can be constructed without crashing
    # see https://github.com/yt-project/yt/issues/3858
    shape = (64, 64, 1)
    arr = np.full(shape, 5.0e-324)
    arr[0, 0] = -1e12
    arr[1, 1] = 200
    d = {"scalar": arr}

    ds = load_uniform_grid(d, shape)
    p = SlicePlot(ds, "z", ("stream", "scalar"))
    p["stream", "scalar"]
Exemplo n.º 12
0
def test_ppv():

    np.random.seed(seed=0x4D3D3D3)

    dims = (8, 8, 128)
    v_shift = 1.0e7 * u.cm / u.s
    sigma_v = 2.0e7 * u.cm / u.s
    T_0 = 1.0e8 * u.Kelvin
    data = {
        "density": (np.ones(dims), "g/cm**3"),
        "temperature": (T_0.v * np.ones(dims), "K"),
        "velocity_x": (np.zeros(dims), "cm/s"),
        "velocity_y": (np.zeros(dims), "cm/s"),
        "velocity_z": (
            np.random.normal(loc=v_shift.v, scale=sigma_v.v, size=dims),
            "cm/s",
        ),
    }

    ds = load_uniform_grid(data, dims)

    cube = PPVCube(
        ds,
        "z",
        ("stream", "density"),
        (-300.0, 300.0, 1024, "km/s"),
        dims=8,
        thermal_broad=True,
    )

    dv = cube.dv
    v_th = np.sqrt(2.0 * kboltz * T_0 / (56.0 * mh) +
                   2.0 * sigma_v**2).in_units("km/s")
    a = cube.data.mean(axis=(0, 1)).v
    b = dv * np.exp(-((
        (cube.vmid + v_shift) / v_th)**2)) / (np.sqrt(np.pi) * v_th)

    assert_allclose_units(a, b, 1.0e-2)

    E_0 = 6.8 * u.keV

    cube.transform_spectral_axis(E_0.v, str(E_0.units))

    dE = -cube.dv
    delta_E = E_0 * v_th.in_cgs() / clight
    E_shift = E_0 * (1.0 + v_shift / clight)

    c = (dE * np.exp(-(((cube.vmid - E_shift) / delta_E)**2)) /
         (np.sqrt(np.pi) * delta_E))

    assert_allclose_units(a, c, 1.0e-2)
Exemplo n.º 13
0
    def export_dataset(self, fields=None, nprocs=1):
        r"""Export a set of pixelized fields to an in-memory dataset that can be
        analyzed as any other in yt. Unit information and other parameters (e.g.,
        geometry, current_time, etc.) will be taken from the parent dataset.

        Parameters
        ----------
        fields : list of strings, optional
            These fields will be pixelized and output. If "None", the keys of the
            FRB will be used.
        nprocs: integer, optional
            If greater than 1, will create this number of subarrays out of data

        Examples
        --------
        >>> import yt
        >>> ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
        >>> slc = ds.slice(2, 0.0)
        >>> frb = slc.to_frb((500.0, "kpc"), 500)
        >>> ds2 = frb.export_dataset(
        ...     fields=[("gas", "density"), ("gas", "temperature")], nprocs=32
        ... )
        """
        nx, ny = self.buff_size
        data = {}
        if fields is None:
            fields = list(self.keys())
        for field in fields:
            arr = self[field]
            data[field] = (arr.d.T.reshape(nx, ny, 1), str(arr.units))
        bounds = [b.in_units("code_length").v for b in self.bounds]
        bbox = np.array([[bounds[0], bounds[1]], [bounds[2], bounds[3]],
                         [0.0, 1.0]])
        return load_uniform_grid(
            data,
            [nx, ny, 1],
            length_unit=self.ds.length_unit,
            bbox=bbox,
            sim_time=self.ds.current_time.in_units("s").v,
            mass_unit=self.ds.mass_unit,
            time_unit=self.ds.time_unit,
            velocity_unit=self.ds.velocity_unit,
            magnetic_unit=self.ds.magnetic_unit,
            periodicity=(False, False, False),
            geometry=self.ds.geometry,
            nprocs=nprocs,
        )
Exemplo n.º 14
0
def test_non_square_frb():
    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    # construct an arbitrary dataset
    arr = np.arange(8.0 * 9.0 * 10.0).reshape((8, 9, 10))
    data = dict(density=(arr, "g/cm**3"))
    bbox = np.array([[-4, 4.0], [-4.5, 4.5], [-5.0, 5]])
    ds = load_uniform_grid(data,
                           arr.shape,
                           length_unit="Mpc",
                           bbox=bbox,
                           periodicity=(False, False, False))

    # make a slice
    slc = ds.slice(axis="z", coord=ds.quan(0.0, "code_length"))
    # make a frb and save it to disk
    center = (ds.quan(0.0, "code_length"), ds.quan(0.0, "code_length"))
    xax, yax = ds.coordinates.x_axis[slc.axis], ds.coordinates.y_axis[slc.axis]
    res = [ds.domain_dimensions[xax], ds.domain_dimensions[yax]]  # = [8,9]
    width = ds.domain_right_edge[xax] - ds.domain_left_edge[
        xax]  # = 8 code_length
    height = ds.domain_right_edge[yax] - ds.domain_left_edge[
        yax]  # = 9 code_length
    frb = slc.to_frb(width=width, height=height, resolution=res, center=center)
    fname = "test_frb_roundtrip.h5"
    frb.save_as_dataset(fname, fields=[("gas", "density")])

    expected_vals = arr[:, :, 5].T
    print(
        "\nConfirmation that initial frb results are expected:",
        (expected_vals == frb[("gas", "density")].v).all(),
        "\n",
    )

    # yt-reload:
    reloaded_ds = load(fname)

    assert_array_equal(frb[("gas", "density")].shape,
                       reloaded_ds.data[("gas", "density")].shape)
    assert_array_equal(frb[("gas", "density")],
                       reloaded_ds.data[("gas", "density")])

    os.chdir(curdir)
    if tmpdir != ".":
        shutil.rmtree(tmpdir)
Exemplo n.º 15
0
def test_particles_outside_domain():
    np.random.seed(0x4D3D3D3)
    posx_arr = np.random.uniform(low=-1.6, high=1.5, size=1000)
    posy_arr = np.random.uniform(low=-1.5, high=1.5, size=1000)
    posz_arr = np.random.uniform(low=-1.5, high=1.5, size=1000)
    dens_arr = np.random.random((16, 16, 16))
    data = dict(
        density=dens_arr,
        particle_position_x=posx_arr,
        particle_position_y=posy_arr,
        particle_position_z=posz_arr,
    )
    bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])
    ds = load_uniform_grid(data, (16, 16, 16), bbox=bbox, nprocs=4)
    wh = (posx_arr < bbox[0, 0]).nonzero()[0]
    assert wh.size == 1000 - ds.particle_type_counts["io"]
    ad = ds.all_data()
    assert ds.particle_type_counts["io"] == ad["particle_position_x"].size
Exemplo n.º 16
0
def test_symlog_min_zero():
    # see https://github.com/yt-project/yt/issues/3791
    shape = (32, 16, 1)
    a = np.linspace(0, 1, 16)
    b = np.ones((32, 16))
    c = np.reshape(a * b, shape)
    data = {("gas", "density"): c}

    ds = load_uniform_grid(
        data,
        shape,
        bbox=np.array([[0.0, 5.0], [0, 1], [-0.1, +0.1]]),
    )

    p = SlicePlot(ds, "z", "density")
    im_arr = p["gas", "density"].image.get_array()

    # check that no data value was mapped to a NaN (log(0))
    assert np.all(~np.isnan(im_arr))
    # 0 should be mapped to itself since we expect a symlog norm
    assert np.min(im_arr) == 0.0
Exemplo n.º 17
0
def test_inside_outside():
    test_array = np.ones((10, 10, 10))
    test_array[1, 1, :] = 2.0
    test_array[2, 1, :] = 3.0
    data = dict(density=test_array)
    ds = load_uniform_grid(data, test_array.shape, length_unit="cm", nprocs=1)
    ad = ds.all_data()

    only_ones_and_twos = ad.include_inside(("gas", "density"), 0.9, 2.1)
    assert np.all(only_ones_and_twos[("gas", "density")] != 3.0)
    assert len(only_ones_and_twos[("gas", "density")]) == 990

    only_ones_and_twos = ad.exclude_outside(("gas", "density"), 0.9, 2.1)
    assert len(only_ones_and_twos[("gas", "density")]) == 990
    assert np.all(only_ones_and_twos[("gas", "density")] != 3.0)

    only_threes = ad.include_outside(("gas", "density"), 0.9, 2.1)
    assert np.all(only_threes[("gas", "density")] == 3)
    assert len(only_threes[("gas", "density")]) == 10

    only_threes = ad.include_outside(("gas", "density"), 0.9, 2.1)
    assert np.all(only_threes[("gas", "density")] == 3)
    assert len(only_threes[("gas", "density")]) == 10

    # Repeat, but convert units to g/m**3
    only_ones_and_twos = ad.include_inside(("gas", "density"), 0.9e6, 2.1e6, "g/m**3")
    assert np.all(only_ones_and_twos[("gas", "density")] != 3.0)
    assert len(only_ones_and_twos[("gas", "density")]) == 990

    only_ones_and_twos = ad.exclude_outside(("gas", "density"), 0.9e6, 2.1e6, "g/m**3")
    assert len(only_ones_and_twos[("gas", "density")]) == 990
    assert np.all(only_ones_and_twos[("gas", "density")] != 3.0)

    only_threes = ad.include_outside(("gas", "density"), 0.9e6, 2.1e6, "g/m**3")
    assert np.all(only_threes[("gas", "density")] == 3)
    assert len(only_threes[("gas", "density")]) == 10

    only_threes = ad.include_outside(("gas", "density"), 0.9e6, 2.1e6, "g/m**3")
    assert np.all(only_threes[("gas", "density")] == 3)
    assert len(only_threes[("gas", "density")]) == 10
def setup_cluster():

    R = 1000.
    r_c = 100.
    rho_c = 1.673e-26
    beta = 1.
    T0 = 4.
    nx, ny, nz = 16, 16, 16
    c = 0.17
    a_c = 30.
    a = 200.
    v0 = 300. * cm_per_km
    ddims = (nx, ny, nz)

    x, y, z = np.mgrid[-R:R:nx * 1j, -R:R:ny * 1j, -R:R:nz * 1j]

    r = np.sqrt(x**2 + y**2 + z**2)

    dens = np.zeros(ddims)
    dens = rho_c * (1. + (r / r_c)**2)**(-1.5 * beta)
    temp = T0 * K_per_keV / (1. + r / a) * (c + r / a_c) / (1. + r / a_c)
    velz = v0 * temp / (T0 * K_per_keV)

    data = {}
    data["density"] = (dens, "g/cm**3")
    data["temperature"] = (temp, "K")
    data["velocity_x"] = (np.zeros(ddims), "cm/s")
    data["velocity_y"] = (np.zeros(ddims), "cm/s")
    data["velocity_z"] = (velz, "cm/s")

    L = 2 * R * cm_per_kpc
    bbox = np.array([[-0.5, 0.5], [-0.5, 0.5], [-0.5, 0.5]]) * L

    ds = load_uniform_grid(data, ddims, length_unit='cm', bbox=bbox)
    ds.index

    return ds
Exemplo n.º 19
0
def test_magnetic_fields():

    ddims = (16, 16, 16)
    data1 = {
        "magnetic_field_x": (np.random.random(size=ddims), "T"),
        "magnetic_field_y": (np.random.random(size=ddims), "T"),
        "magnetic_field_z": (np.random.random(size=ddims), "T"),
    }
    data2 = {}
    for field in data1:
        data2[field] = (data1[field][0] * 1.0e4, "gauss")

    ds0 = load_uniform_grid(data1, ddims, unit_system="cgs")
    ds1 = load_uniform_grid(data1,
                            ddims,
                            magnetic_unit=(1.0, "T"),
                            unit_system="cgs")
    ds2 = load_uniform_grid(data2, ddims, unit_system="mks")
    # For this test dataset, code units are cgs units
    ds3 = load_uniform_grid(data2, ddims, unit_system="code")
    # For this test dataset, code units are SI units
    ds4 = load_uniform_grid(data1,
                            ddims,
                            magnetic_unit=(1.0, "T"),
                            unit_system="code")

    ds0.index
    ds1.index
    ds2.index
    ds3.index
    ds4.index

    dd0 = ds0.all_data()
    dd1 = ds1.all_data()
    dd2 = ds2.all_data()
    dd3 = ds3.all_data()
    dd4 = ds4.all_data()

    assert ds0.fields.gas.magnetic_field_strength.units == "G"
    assert ds1.fields.gas.magnetic_field_strength.units == "G"
    assert ds1.fields.gas.magnetic_field_poloidal.units == "G"
    assert ds1.fields.gas.magnetic_field_toroidal.units == "G"
    assert ds2.fields.gas.magnetic_field_strength.units == "T"
    assert ds2.fields.gas.magnetic_field_poloidal.units == "T"
    assert ds2.fields.gas.magnetic_field_toroidal.units == "T"
    assert ds3.fields.gas.magnetic_field_strength.units == "code_magnetic"
    assert ds3.fields.gas.magnetic_field_poloidal.units == "code_magnetic"
    assert ds3.fields.gas.magnetic_field_toroidal.units == "code_magnetic"
    assert ds4.fields.gas.magnetic_field_strength.units == "code_magnetic"
    assert ds4.fields.gas.magnetic_field_poloidal.units == "code_magnetic"
    assert ds4.fields.gas.magnetic_field_toroidal.units == "code_magnetic"

    emag0 = (dd0[("gas", "magnetic_field_x")]**2 +
             dd0[("gas", "magnetic_field_y")]**2 +
             dd0[("gas", "magnetic_field_z")]**2) / (8.0 * np.pi)
    emag0.convert_to_units("dyne/cm**2")

    emag1 = (dd1[("gas", "magnetic_field_x")]**2 +
             dd1[("gas", "magnetic_field_y")]**2 +
             dd1[("gas", "magnetic_field_z")]**2) / (8.0 * np.pi)
    emag1.convert_to_units("dyne/cm**2")

    emag2 = (dd2[("gas", "magnetic_field_x")]**2 +
             dd2[("gas", "magnetic_field_y")]**2 +
             dd2[("gas", "magnetic_field_z")]**2) / (2.0 * mu_0)
    emag2.convert_to_units("Pa")

    emag3 = (dd3[("gas", "magnetic_field_x")]**2 +
             dd3[("gas", "magnetic_field_y")]**2 +
             dd3[("gas", "magnetic_field_z")]**2) / (8.0 * np.pi)
    emag3.convert_to_units("code_pressure")

    emag4 = (dd4[("gas", "magnetic_field_x")]**2 +
             dd4[("gas", "magnetic_field_y")]**2 +
             dd4[("gas", "magnetic_field_z")]**2) / (2.0 * mu_0)
    emag4.convert_to_units("code_pressure")

    # note that "magnetic_energy_density" and "magnetic_pressure" are aliased

    assert_almost_equal(emag0, dd0[("gas", "magnetic_energy_density")])
    assert_almost_equal(emag1, dd1[("gas", "magnetic_energy_density")])
    assert_almost_equal(emag2, dd2[("gas", "magnetic_energy_density")])
    assert_almost_equal(emag3, dd3[("gas", "magnetic_energy_density")])
    assert_almost_equal(emag4, dd4[("gas", "magnetic_energy_density")])

    assert str(emag0.units) == str(dd0[("gas",
                                        "magnetic_energy_density")].units)
    assert str(emag1.units) == str(dd1[("gas",
                                        "magnetic_energy_density")].units)
    assert str(emag2.units) == str(dd2[("gas",
                                        "magnetic_energy_density")].units)
    assert str(emag3.units) == str(dd3[("gas",
                                        "magnetic_energy_density")].units)
    assert str(emag4.units) == str(dd4[("gas",
                                        "magnetic_energy_density")].units)

    assert_almost_equal(emag1.in_cgs(), emag0.in_cgs())
    assert_almost_equal(emag2.in_cgs(), emag0.in_cgs())
    assert_almost_equal(emag1.in_cgs(), emag2.in_cgs())
    assert_almost_equal(emag1.in_cgs(), emag3.in_cgs())
    assert_almost_equal(emag1.in_cgs(), emag4.in_cgs())
Exemplo n.º 20
0
def fake_vr_orientation_test_ds(N=96, scale=1):
    """
    create a toy dataset that puts a sphere at (0,0,0), a single cube
    on +x, two cubes on +y, and three cubes on +z in a domain from
    [-1*scale,1*scale]**3.  The lower planes
    (x = -1*scale, y = -1*scale, z = -1*scale) are also given non-zero
    values.

    This dataset allows you to easily explore orientations and
    handiness in VR and other renderings

    Parameters
    ----------

    N : integer
       The number of cells along each direction

    scale : float
       A spatial scale, the domain boundaries will be multiplied by scale to
       test datasets that have spatial different scales (e.g. data in CGS units)

    """
    from yt.loaders import load_uniform_grid

    xmin = ymin = zmin = -1.0 * scale
    xmax = ymax = zmax = 1.0 * scale

    dcoord = (xmax - xmin) / N

    arr = np.zeros((N, N, N), dtype=np.float64)
    arr[:, :, :] = 1.0e-4

    bbox = np.array([[xmin, xmax], [ymin, ymax], [zmin, zmax]])

    # coordinates -- in the notation data[i, j, k]
    x = (np.arange(N) + 0.5) * dcoord + xmin
    y = (np.arange(N) + 0.5) * dcoord + ymin
    z = (np.arange(N) + 0.5) * dcoord + zmin

    x3d, y3d, z3d = np.meshgrid(x, y, z, indexing="ij")

    # sphere at the origin
    c = np.array([0.5 * (xmin + xmax), 0.5 * (ymin + ymax), 0.5 * (zmin + zmax)])
    r = np.sqrt((x3d - c[0]) ** 2 + (y3d - c[1]) ** 2 + (z3d - c[2]) ** 2)
    arr[r < 0.05] = 1.0

    arr[abs(x3d - xmin) < 2 * dcoord] = 0.3
    arr[abs(y3d - ymin) < 2 * dcoord] = 0.3
    arr[abs(z3d - zmin) < 2 * dcoord] = 0.3

    # single cube on +x
    xc = 0.75 * scale
    dx = 0.05 * scale
    idx = np.logical_and(
        np.logical_and(x3d > xc - dx, x3d < xc + dx),
        np.logical_and(
            np.logical_and(y3d > -dx, y3d < dx), np.logical_and(z3d > -dx, z3d < dx)
        ),
    )
    arr[idx] = 1.0

    # two cubes on +y
    dy = 0.05 * scale
    for yc in [0.65 * scale, 0.85 * scale]:
        idx = np.logical_and(
            np.logical_and(y3d > yc - dy, y3d < yc + dy),
            np.logical_and(
                np.logical_and(x3d > -dy, x3d < dy), np.logical_and(z3d > -dy, z3d < dy)
            ),
        )
        arr[idx] = 0.8

    # three cubes on +z
    dz = 0.05 * scale
    for zc in [0.5 * scale, 0.7 * scale, 0.9 * scale]:
        idx = np.logical_and(
            np.logical_and(z3d > zc - dz, z3d < zc + dz),
            np.logical_and(
                np.logical_and(x3d > -dz, x3d < dz), np.logical_and(y3d > -dz, y3d < dz)
            ),
        )
        arr[idx] = 0.6

    data = dict(density=(arr, "g/cm**3"))
    ds = load_uniform_grid(data, arr.shape, bbox=bbox)
    return ds
Exemplo n.º 21
0
def test_clump_finding():
    n_c = 8
    n_p = 1
    dims = (n_c, n_c, n_c)

    density = np.ones(dims)
    high_rho = 10.0
    # add a couple disconnected density enhancements
    density[2, 2, 2] = high_rho
    density[6, 6, 6] = high_rho

    # put a particle at the center of one of them
    dx = 1.0 / n_c
    px = 2.5 * dx * np.ones(n_p)

    data = {
        "density": density,
        "particle_mass": np.ones(n_p),
        "particle_position_x": px,
        "particle_position_y": px,
        "particle_position_z": px,
    }

    ds = load_uniform_grid(data, dims)

    ad = ds.all_data()
    master_clump = Clump(ad, ("gas", "density"))
    master_clump.add_validator("min_cells", 1)

    def _total_volume(clump):
        total_vol = clump.data.quantities.total_quantity(["cell_volume"
                                                          ]).in_units("cm**3")
        return "Cell Volume: %6e cm**3.", total_vol

    add_clump_info("total_volume", _total_volume)
    master_clump.add_info_item("total_volume")

    find_clumps(master_clump, 0.5, 2.0 * high_rho, 10.0)

    # there should be two children
    assert_equal(len(master_clump.children), 2)

    leaf_clumps = master_clump.leaves

    for l in leaf_clumps:
        keys = l.info.keys()
        assert "total_cells" in keys
        assert "cell_mass" in keys
        assert "max_grid_level" in keys
        assert "total_volume" in keys

    # two leaf clumps
    assert_equal(len(leaf_clumps), 2)

    # check some clump fields
    assert_equal(master_clump.children[0]["density"][0].size, 1)
    assert_equal(master_clump.children[0]["density"][0], ad["density"].max())
    assert_equal(master_clump.children[0]["particle_mass"].size, 1)
    assert_array_equal(master_clump.children[0]["particle_mass"],
                       ad["particle_mass"])
    assert_equal(master_clump.children[1]["density"][0].size, 1)
    assert_equal(master_clump.children[1]["density"][0], ad["density"].max())
    assert_equal(master_clump.children[1]["particle_mass"].size, 0)

    # clean up global registry to avoid polluting other tests
    del clump_info_registry["total_volume"]
Exemplo n.º 22
0
    def create_dataset(self,
                       domain_dimensions,
                       box_size,
                       left_edge=None,
                       **kwargs):
        """
        Create an in-memory, uniformly gridded dataset in 3D using yt by
        placing the clusters into a box. When adding multiple clusters,
        per-volume quantities from each cluster such as density and
        pressure are added, whereas per-mass quantites such as temperature
        and velocity are mass-weighted.

        Parameters
        ----------
        domain_dimensions : 3-tuple of ints
            The number of cells on a side for the domain.
        box_size : float
            The size of the box in kpc.
        left_edge : array_like, optional
            The minimum coordinate of the box in all three dimensions,
            in kpc. Default: None, which means the left edge will
            be [0, 0, 0].
        """
        from yt.loaders import load_uniform_grid
        from scipy.interpolate import InterpolatedUnivariateSpline
        if left_edge is None:
            left_edge = np.zeros(3)
        left_edge = np.array(left_edge)
        bbox = [[left_edge[0], left_edge[0] + box_size],
                [left_edge[1], left_edge[1] + box_size],
                [left_edge[2], left_edge[2] + box_size]]
        x, y, z = np.mgrid[bbox[0][0]:bbox[0][1]:domain_dimensions[0] * 1j,
                           bbox[1][0]:bbox[1][1]:domain_dimensions[1] * 1j,
                           bbox[2][0]:bbox[2][1]:domain_dimensions[2] * 1j, ]
        fields1 = [
            "density", "pressure", "dark_matter_density"
            "stellar_density", "gravitational_potential"
        ]
        fields2 = ["temperature"]
        fields3 = ["velocity_x", "velocity_y", "velocity_z"]
        units = {
            "density": "Msun/kpc**3",
            "pressure": "Msun/kpc/Myr**2",
            "dark_matter_density": "Msun/kpc**3",
            "stellar_density": "Msun/kpc**3",
            "temperature": "K",
            "gravitational_potential": "kpc**2/Myr**2",
            "velocity_x": "kpc/Myr",
            "velocity_y": "kpc/Myr",
            "velocity_z": "kpc/Myr",
            "magnetic_field_strength": "G"
        }
        fields = fields1 + fields2
        data = {}
        for i, profile in enumerate(self.profiles):
            p = ClusterModel.from_h5_file(profile)
            xx = x - self.center.d[i][0]
            yy = y - self.center.d[i][1]
            zz = z - self.center.d[i][2]
            rr = np.sqrt(xx * xx + yy * yy + zz * zz)
            fd = InterpolatedUnivariateSpline(p["radius"].d, p["density"].d)
            for field in fields:
                if field not in p:
                    continue
                if field not in data:
                    data[field] = (np.zeros(domain_dimensions), units[field])
                f = InterpolatedUnivariateSpline(p["radius"].d, p[field].d)
                if field in fields1:
                    data[field][0] += f(rr)
                elif field in fields2:
                    data[field][0] += f(rr) * fd(rr)
            for field in fields3:
                data[field][0] += self.velocity.d[i][0] * fd(rr)
        if "density" in data:
            for field in fields2 + fields3:
                data[field][0] /= data["density"][0]
        return load_uniform_grid(data,
                                 domain_dimensions,
                                 length_unit="kpc",
                                 bbox=bbox,
                                 mass_unit="Msun",
                                 time_unit="Myr",
                                 **kwargs)
Exemplo n.º 23
0
def test_stream_particles():
    num_particles = 100000
    domain_dims = (64, 64, 64)
    dens = np.random.random(domain_dims)
    x = np.random.uniform(size=num_particles)
    y = np.random.uniform(size=num_particles)
    z = np.random.uniform(size=num_particles)
    m = np.ones(num_particles)

    # Field operators and cell flagging methods

    fo = []
    fo.append(ic.TopHatSphere(0.1, [0.2, 0.3, 0.4], {"density": 2.0}))
    fo.append(ic.TopHatSphere(0.05, [0.7, 0.4, 0.75], {"density": 20.0}))

    # Add particles

    fields1 = {
        "density": dens,
        "particle_position_x": x,
        "particle_position_y": y,
        "particle_position_z": z,
        "particle_mass": m,
    }

    fields2 = fields1.copy()

    ug1 = load_uniform_grid(fields1, domain_dims, 1.0)
    ug2 = load_uniform_grid(fields2, domain_dims, 1.0, nprocs=8)

    # Check to make sure the number of particles is the same

    number_of_particles1 = np.sum(
        [grid.NumberOfParticles for grid in ug1.index.grids])
    number_of_particles2 = np.sum(
        [grid.NumberOfParticles for grid in ug2.index.grids])

    assert_equal(number_of_particles1, num_particles)
    assert_equal(number_of_particles1, number_of_particles2)

    for grid in ug2.index.grids:
        tot_parts = grid["io", "particle_position_x"].size
        tot_all_parts = grid["all", "particle_position_x"].size
        assert tot_parts == grid.NumberOfParticles
        assert tot_all_parts == grid.NumberOfParticles

    # Check to make sure the fields have been defined correctly

    for ptype in ("all", "io"):
        assert (ug1._get_field_info(
            ptype, "particle_position_x").sampling_type == "particle")
        assert (ug1._get_field_info(
            ptype, "particle_position_y").sampling_type == "particle")
        assert (ug1._get_field_info(
            ptype, "particle_position_z").sampling_type == "particle")
        assert ug1._get_field_info(ptype,
                                   "particle_mass").sampling_type == "particle"
    assert not ug1._get_field_info("gas",
                                   "density").sampling_type == "particle"

    for ptype in ("all", "io"):
        assert (ug2._get_field_info(
            ptype, "particle_position_x").sampling_type == "particle")
        assert (ug2._get_field_info(
            ptype, "particle_position_y").sampling_type == "particle")
        assert (ug2._get_field_info(
            ptype, "particle_position_z").sampling_type == "particle")
        assert ug2._get_field_info(ptype,
                                   "particle_mass").sampling_type == "particle"
    assert not ug2._get_field_info("gas",
                                   "density").sampling_type == "particle"

    # Now perform similar checks, but with multiple particle types

    num_dm_particles = 30000
    xd = np.random.uniform(size=num_dm_particles)
    yd = np.random.uniform(size=num_dm_particles)
    zd = np.random.uniform(size=num_dm_particles)
    md = np.ones(num_dm_particles)

    num_star_particles = 20000
    xs = np.random.uniform(size=num_star_particles)
    ys = np.random.uniform(size=num_star_particles)
    zs = np.random.uniform(size=num_star_particles)
    ms = 2.0 * np.ones(num_star_particles)

    dens = np.random.random(domain_dims)

    fields3 = {
        "density": dens,
        ("dm", "particle_position_x"): xd,
        ("dm", "particle_position_y"): yd,
        ("dm", "particle_position_z"): zd,
        ("dm", "particle_mass"): md,
        ("star", "particle_position_x"): xs,
        ("star", "particle_position_y"): ys,
        ("star", "particle_position_z"): zs,
        ("star", "particle_mass"): ms,
    }

    fields4 = fields3.copy()

    ug3 = load_uniform_grid(fields3, domain_dims, 1.0)
    ug4 = load_uniform_grid(fields4, domain_dims, 1.0, nprocs=8)

    # Check to make sure the number of particles is the same

    number_of_particles3 = np.sum(
        [grid.NumberOfParticles for grid in ug3.index.grids])
    number_of_particles4 = np.sum(
        [grid.NumberOfParticles for grid in ug4.index.grids])

    assert_equal(number_of_particles3, num_dm_particles + num_star_particles)
    assert_equal(number_of_particles3, number_of_particles4)

    for grid in ug4.index.grids:
        tot_parts = grid["dm", "particle_position_x"].size
        tot_parts += grid["star", "particle_position_x"].size
        tot_all_parts = grid["all", "particle_position_x"].size
        assert tot_parts == grid.NumberOfParticles
        assert tot_all_parts == grid.NumberOfParticles

    # Check to make sure the fields have been defined correctly

    for ptype in ("dm", "star"):
        assert (ug3._get_field_info(
            ptype, "particle_position_x").sampling_type == "particle")
        assert (ug3._get_field_info(
            ptype, "particle_position_y").sampling_type == "particle")
        assert (ug3._get_field_info(
            ptype, "particle_position_z").sampling_type == "particle")
        assert ug3._get_field_info(ptype,
                                   "particle_mass").sampling_type == "particle"
        assert (ug4._get_field_info(
            ptype, "particle_position_x").sampling_type == "particle")
        assert (ug4._get_field_info(
            ptype, "particle_position_y").sampling_type == "particle")
        assert (ug4._get_field_info(
            ptype, "particle_position_z").sampling_type == "particle")
        assert ug4._get_field_info(ptype,
                                   "particle_mass").sampling_type == "particle"
def test_particle_generator():
    # First generate our dataset
    domain_dims = (32, 32, 32)
    dens = np.zeros(domain_dims) + 0.1
    temp = 4.0 * np.ones(domain_dims)
    fields = {
        "density": (dens, "code_mass/code_length**3"),
        "temperature": (temp, "K")
    }
    ds = load_uniform_grid(fields, domain_dims, 1.0)

    # Now generate particles from density

    field_list = [
        ("io", "particle_position_x"),
        ("io", "particle_position_y"),
        ("io", "particle_position_z"),
        ("io", "particle_index"),
        ("io", "particle_gas_density"),
    ]
    num_particles = 10000
    field_dict = {("gas", "density"): ("io", "particle_gas_density")}
    sphere = ds.sphere(ds.domain_center, 0.45)

    particles1 = WithDensityParticleGenerator(ds, sphere, num_particles,
                                              field_list)
    particles1.assign_indices()
    particles1.map_grid_fields_to_particles(field_dict)

    # Test to make sure we ended up with the right number of particles per grid
    particles1.apply_to_stream()
    particles_per_grid1 = [grid.NumberOfParticles for grid in ds.index.grids]
    assert_equal(particles_per_grid1, particles1.NumberOfParticles)
    particles_per_grid1 = [
        len(grid["particle_position_x"]) for grid in ds.index.grids
    ]
    assert_equal(particles_per_grid1, particles1.NumberOfParticles)

    tags = uconcatenate([grid["particle_index"] for grid in ds.index.grids])
    assert np.unique(tags).size == num_particles

    del tags

    # Set up a lattice of particles
    pdims = np.array([32, 32, 32])

    def new_indices():
        # We just add new indices onto the existing ones
        return np.arange(np.product(pdims)) + num_particles

    le = np.array([0.25, 0.25, 0.25])
    re = np.array([0.75, 0.75, 0.75])

    particles2 = LatticeParticleGenerator(ds, pdims, le, re, field_list)
    particles2.assign_indices(function=new_indices)
    particles2.map_grid_fields_to_particles(field_dict)

    # Test lattice positions
    xpos = np.unique(particles2["io", "particle_position_x"])
    ypos = np.unique(particles2["io", "particle_position_y"])
    zpos = np.unique(particles2["io", "particle_position_z"])

    xpred = np.linspace(le[0], re[0], num=pdims[0], endpoint=True)
    ypred = np.linspace(le[1], re[1], num=pdims[1], endpoint=True)
    zpred = np.linspace(le[2], re[2], num=pdims[2], endpoint=True)

    assert_almost_equal(xpos, xpred)
    assert_almost_equal(ypos, ypred)
    assert_almost_equal(zpos, zpred)

    del xpos, ypos, zpos
    del xpred, ypred, zpred

    # Test the number of particles again
    particles2.apply_to_stream()
    particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]
    assert_equal(particles_per_grid2,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)

    [grid.field_data.clear() for grid in ds.index.grids]
    particles_per_grid2 = [
        len(grid["particle_position_x"]) for grid in ds.index.grids
    ]
    assert_equal(particles_per_grid2,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)

    # Test the uniqueness of tags
    tags = np.concatenate([grid["particle_index"] for grid in ds.index.grids])
    tags.sort()
    assert_equal(tags, np.arange(np.product(pdims) + num_particles))

    del tags

    # Now dump all of these particle fields out into a dict
    pdata = {}
    dd = ds.all_data()
    for field in field_list:
        pdata[field] = dd[field]

    # Test the "from-list" generator and particle field overwrite
    num_particles3 = num_particles + np.product(pdims)
    particles3 = FromListParticleGenerator(ds, num_particles3, pdata)
    particles3.apply_to_stream(overwrite=True)

    # Test the number of particles again
    particles_per_grid3 = [grid.NumberOfParticles for grid in ds.index.grids]
    assert_equal(particles_per_grid3,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)
    particles_per_grid2 = [
        len(grid["particle_position_z"]) for grid in ds.index.grids
    ]
    assert_equal(particles_per_grid3,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)
    assert_equal(particles_per_grid2, particles_per_grid3)

    # Test adding in particles with a different particle type

    num_star_particles = 20000
    pdata2 = {
        ("star", "particle_position_x"):
        np.random.uniform(size=num_star_particles),
        ("star", "particle_position_y"):
        np.random.uniform(size=num_star_particles),
        ("star", "particle_position_z"):
        np.random.uniform(size=num_star_particles),
    }

    particles4 = FromListParticleGenerator(ds,
                                           num_star_particles,
                                           pdata2,
                                           ptype="star")
    particles4.apply_to_stream()

    dd = ds.all_data()
    assert dd["star", "particle_position_x"].size == num_star_particles
    assert dd["io", "particle_position_x"].size == num_particles3
    assert dd[
        "all",
        "particle_position_x"].size == num_star_particles + num_particles3

    del pdata
    del pdata2
    del ds
    del particles1
    del particles2
    del particles4
    del fields
    del dens
    del temp