Пример #1
0
def test_load_particles_types():

    num_particles = 10000

    data1 = {
        "particle_position_x": np.random.random(size=num_particles),
        "particle_position_y": np.random.random(size=num_particles),
        "particle_position_z": np.random.random(size=num_particles),
        "particle_mass": np.ones(num_particles),
    }

    ds1 = load_particles(data1)
    ds1.index

    assert set(ds1.particle_types) == {"all", "io", "nbody"}

    dd = ds1.all_data()

    for ax in "xyz":
        assert dd["io", f"particle_position_{ax}"].size == num_particles
        assert dd["all", f"particle_position_{ax}"].size == num_particles
        assert dd["nbody", f"particle_position_{ax}"].size == num_particles

    num_dm_particles = 10000
    num_star_particles = 50000
    num_tot_particles = num_dm_particles + num_star_particles

    data2 = {
        ("dm", "particle_position_x"): np.random.random(size=num_dm_particles),
        ("dm", "particle_position_y"): np.random.random(size=num_dm_particles),
        ("dm", "particle_position_z"): np.random.random(size=num_dm_particles),
        ("dm", "particle_mass"): np.ones(num_dm_particles),
        ("star", "particle_position_x"):
        np.random.random(size=num_star_particles),
        ("star", "particle_position_y"):
        np.random.random(size=num_star_particles),
        ("star", "particle_position_z"):
        np.random.random(size=num_star_particles),
        ("star", "particle_mass"): 2.0 * np.ones(num_star_particles),
    }

    ds2 = load_particles(data2)
    ds2.index

    assert set(ds2.particle_types) == {"all", "star", "dm", "nbody"}

    dd = ds2.all_data()

    for ax in "xyz":
        npart = 0
        for ptype in ds2.particle_types_raw:
            npart += dd[ptype, f"particle_position_{ax}"].size
        assert npart == num_tot_particles
        assert dd["all", f"particle_position_{ax}"].size == num_tot_particles
Пример #2
0
def test_load_particles_with_data_source():
    ds1 = fake_particle_ds()

    # Load from dataset
    ad = ds1.all_data()
    fields = ["particle_mass"]
    fields += [f"particle_position_{ax}" for ax in "xyz"]
    data = {field: ad[field] for field in fields}
    ds2 = load_particles(data, data_source=ad)

    def in_cgs(quan):
        return quan.in_cgs().v

    # Test bbox is parsed correctly
    for attr in ["domain_left_edge", "domain_right_edge"]:
        assert np.allclose(in_cgs(getattr(ds1, attr)),
                           in_cgs(getattr(ds2, attr)))

    # Test sim_time is parsed correctly
    assert in_cgs(ds1.current_time) == in_cgs(ds2.current_time)

    # Test code units are parsed correctly
    def get_cu(ds, dim):
        return ds.quan(1, "code_" + dim)

    for dim in ["length", "mass", "time", "velocity", "magnetic"]:
        assert in_cgs(get_cu(ds1, dim)) == in_cgs(get_cu(ds2, dim))
Пример #3
0
def fake_sph_orientation_ds():
    """Returns an in-memory SPH dataset useful for testing

    This dataset should have one particle at the origin, one more particle
    along the x axis, two along y, and three along z. All particles will
    have non-overlapping smoothing regions with a radius of 0.25, masses of 1,
    and densities of 1, and zero velocity.
    """
    from yt import load_particles

    npart = 7

    # one particle at the origin, one particle along x-axis, two along y,
    # three along z
    data = {
        "particle_position_x": (np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]), "cm"),
        "particle_position_y": (np.array([0.0, 0.0, 1.0, 2.0, 0.0, 0.0, 0.0]), "cm"),
        "particle_position_z": (np.array([0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0]), "cm"),
        "particle_mass": (np.ones(npart), "g"),
        "particle_velocity_x": (np.zeros(npart), "cm/s"),
        "particle_velocity_y": (np.zeros(npart), "cm/s"),
        "particle_velocity_z": (np.zeros(npart), "cm/s"),
        "smoothing_length": (0.25 * np.ones(npart), "cm"),
        "density": (np.ones(npart), "g/cm**3"),
        "temperature": (np.ones(npart), "K"),
    }

    bbox = np.array([[-4, 4], [-4, 4], [-4, 4]])

    return load_particles(data=data, length_unit=1.0, bbox=bbox)
Пример #4
0
def test_arbitrary_grid():
    for ncells in [32, 64]:
        for px in [0.125, 0.25, 0.55519]:

            particle_data = {
                "particle_position_x": np.array([px]),
                "particle_position_y": np.array([0.5]),
                "particle_position_z": np.array([0.5]),
                "particle_mass": np.array([1.0]),
            }

            ds = load_particles(particle_data)

            for dims in ([ncells] * 3, [ncells, ncells / 2, ncells / 4]):
                LE = np.array([0.05, 0.05, 0.05])
                RE = np.array([0.95, 0.95, 0.95])
                dims = np.array(dims)

                dds = (RE - LE) / dims
                volume = ds.quan(np.product(dds), "cm**3")

                obj = ds.arbitrary_grid(LE, RE, dims)
                deposited_mass = obj["deposit", "all_density"].sum() * volume

                assert_equal(deposited_mass, ds.quan(1.0, "g"))

                LE = np.array([0.00, 0.00, 0.00])
                RE = np.array([0.05, 0.05, 0.05])

                obj = ds.arbitrary_grid(LE, RE, dims)

                deposited_mass = obj["deposit", "all_density"].sum()

                assert_equal(deposited_mass, 0)

    # Test that we get identical results to the covering grid for unigrid data.
    # Testing AMR data is much harder.
    for nprocs in [1, 2, 4, 8]:
        ds = fake_random_ds(32, nprocs=nprocs)
        for ref_level in [0, 1, 2]:
            cg = ds.covering_grid(
                ref_level, [0.0, 0.0, 0.0], 2 ** ref_level * ds.domain_dimensions
            )
            ag = ds.arbitrary_grid(
                [0.0, 0.0, 0.0], [1.0, 1.0, 1.0], 2 ** ref_level * ds.domain_dimensions
            )
            assert_almost_equal(cg["density"], ag["density"])
Пример #5
0
def fake_particle_ds(
    fields=None,
    units=None,
    negative=None,
    npart=16 ** 3,
    length_unit=1.0,
    data=None,
):
    from yt.loaders import load_particles

    prng = RandomState(0x4D3D3D3)
    if negative is not None and not is_sequence(negative):
        negative = [negative for f in fields]

    fields, units, negative = _check_field_unit_args_helper(
        {
            "fields": fields,
            "units": units,
            "negative": negative,
        },
        {
            "fields": _fake_particle_ds_default_fields,
            "units": _fake_particle_ds_default_units,
            "negative": _fake_particle_ds_default_negative,
        },
    )

    offsets = []
    for n in negative:
        if n:
            offsets.append(0.5)
        else:
            offsets.append(0.0)
    data = data if data else {}
    for field, offset, u in zip(fields, offsets, units):
        if field in data:
            v = data[field]
            continue
        if "position" in field:
            v = prng.normal(loc=0.5, scale=0.25, size=npart)
            np.clip(v, 0.0, 1.0, v)
        v = prng.random_sample(npart) - offset
        data[field] = (v, u)
    bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
    ds = load_particles(data, 1.0, bbox=bbox)
    return ds
Пример #6
0
def fake_sph_grid_ds(hsml_factor=1.0):
    """Returns an in-memory SPH dataset useful for testing

    This dataset should have 27 particles with the particles arranged uniformly
    on a 3D grid. The bottom left corner is (0.5,0.5,0.5) and the top right
    corner is (2.5,2.5,2.5). All particles will have non-overlapping smoothing
    regions with a radius of 0.05, masses of 1, and densities of 1, and zero
    velocity.
    """
    from yt import load_particles

    npart = 27

    x = np.empty(npart)
    y = np.empty(npart)
    z = np.empty(npart)

    tot = 0
    for i in range(0, 3):
        for j in range(0, 3):
            for k in range(0, 3):
                x[tot] = i + 0.5
                y[tot] = j + 0.5
                z[tot] = k + 0.5
                tot += 1

    data = {
        "particle_position_x": (x, "cm"),
        "particle_position_y": (y, "cm"),
        "particle_position_z": (z, "cm"),
        "particle_mass": (np.ones(npart), "g"),
        "particle_velocity_x": (np.zeros(npart), "cm/s"),
        "particle_velocity_y": (np.zeros(npart), "cm/s"),
        "particle_velocity_z": (np.zeros(npart), "cm/s"),
        "smoothing_length": (0.05 * np.ones(npart) * hsml_factor, "cm"),
        "density": (np.ones(npart), "g/cm**3"),
        "temperature": (np.ones(npart), "K"),
    }

    bbox = np.array([[0, 3], [0, 3], [0, 3]])

    return load_particles(data=data, length_unit=1.0, bbox=bbox)
Пример #7
0
def fake_particle_ds(
    fields=(
        "particle_position_x",
        "particle_position_y",
        "particle_position_z",
        "particle_mass",
        "particle_velocity_x",
        "particle_velocity_y",
        "particle_velocity_z",
    ),
    units=("cm", "cm", "cm", "g", "cm/s", "cm/s", "cm/s"),
    negative=(False, False, False, False, True, True, True),
    npart=16**3,
    length_unit=1.0,
    data=None,
):
    from yt.loaders import load_particles

    prng = RandomState(0x4D3D3D3)
    if not iterable(negative):
        negative = [negative for f in fields]
    assert len(fields) == len(negative)
    offsets = []
    for n in negative:
        if n:
            offsets.append(0.5)
        else:
            offsets.append(0.0)
    data = data if data else {}
    for field, offset, u in zip(fields, offsets, units):
        if field in data:
            v = data[field]
            continue
        if "position" in field:
            v = prng.normal(loc=0.5, scale=0.25, size=npart)
            np.clip(v, 0.0, 1.0, v)
        v = prng.random_sample(npart) - offset
        data[field] = (v, u)
    bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
    ds = load_particles(data, 1.0, bbox=bbox)
    return ds
def _parse_old_halo_list(data_ds, halo_list):
    r"""
    Convert the halo list into a loaded dataset.
    """

    num_halos = len(halo_list)

    if num_halos == 0: return None

    # Set up fields that we want to pull from identified halos and their units
    new_fields = [
        'particle_identifier', 'particle_mass', 'particle_position_x',
        'particle_position_y', 'particle_position_z', 'virial_radius'
    ]
    new_units = ['', 'g', 'cm', 'cm', 'cm', 'cm']

    # Set up a dictionary based on those fields
    # with empty arrays where we will fill in their values
    halo_properties = { f : (np.zeros(num_halos),unit) \
        for f, unit in zip(new_fields,new_units)}

    save_particles = getattr(halo_list, "save_particles", False)
    if save_particles:
        n_particles = np.zeros(num_halos, dtype=np.int32)

    # Iterate through the halos pulling out their positions and virial quantities
    # and filling in the properties dictionary
    for i, halo in enumerate(halo_list):
        halo_properties['particle_identifier'][0][i] = halo.id
        halo_properties['particle_mass'][0][i] = halo.virial_mass().in_cgs()
        halo_properties['virial_radius'][0][i] = halo.virial_radius().in_cgs()

        if save_particles:
            n_particles[i] = halo.indices.size

        com = halo.center_of_mass().in_cgs()
        halo_properties['particle_position_x'][0][i] = com[0]
        halo_properties['particle_position_y'][0][i] = com[1]
        halo_properties['particle_position_z'][0][i] = com[2]

    if save_particles:
        member_ids = np.empty(n_particles.sum(), dtype=np.int64)
        np.concatenate(
            [halo['particle_index'].astype(np.int64) for halo in halo_list],
            out=member_ids)

    # Define a bounding box based on original data ds
    bbox = np.array([
        data_ds.domain_left_edge.in_cgs(),
        data_ds.domain_right_edge.in_cgs()
    ]).T

    # Create a ds with the halos as particles
    particle_ds = load_particles(halo_properties,
                                 bbox=bbox,
                                 length_unit=1,
                                 mass_unit=1)

    # Create the field info dictionary so we can reference those fields
    particle_ds.create_field_info()

    for attr in [
            "current_redshift", "current_time", "domain_dimensions",
            "cosmological_simulation", "omega_lambda", "omega_matter",
            "hubble_constant"
    ]:
        attr_val = getattr(data_ds, attr)
        setattr(particle_ds, attr, attr_val)
    particle_ds.current_time = particle_ds.current_time.in_cgs()

    particle_ds.unit_registry.modify("h", particle_ds.hubble_constant)
    # Comoving lengths
    for my_unit in ["m", "pc", "AU"]:
        new_unit = "%scm" % my_unit
        particle_ds.unit_registry.add(
            new_unit, particle_ds.unit_registry.lut[my_unit][0] /
            (1 + particle_ds.current_redshift), length,
            "\\rm{%s}/(1+z)" % my_unit)

    if save_particles:
        start = n_particles.cumsum() - n_particles
        particle_ds.particles = {
            'ids': member_ids,
            'particle_number': n_particles,
            'particle_index_start': start
        }

    return particle_ds