Ejemplo n.º 1
0
def test_amr_kdtree_coverage():
    return  #TESTDISABLED
    domain_dims = (32, 32, 32)
    data = np.zeros(domain_dims) + 0.25
    fo = [
        ic.CoredSphere(0.05, 0.3, [0.7, 0.4, 0.75], {"density": (0.25, 100.0)})
    ]
    rc = [fm.flagging_method_registry["overdensity"](8.0)]
    ug = load_uniform_grid({"density": data}, domain_dims, 1.0)
    ds = refine_amr(ug, rc, fo, 5)

    kd = AMRKDTree(ds)

    volume = kd.count_volume()
    yield assert_equal, volume, \
        np.prod(ds.domain_right_edge - ds.domain_left_edge)

    cells = kd.count_cells()
    true_cells = ds.all_data().quantities['TotalQuantity']('Ones')[0]
    yield assert_equal, cells, true_cells

    # This largely reproduces the AMRKDTree.tree.check_tree() functionality
    tree_ok = True
    for node in kd.tree.trunk.depth_traverse():
        if node.grid is None:
            continue
        grid = ds.index.grids[node.grid - kd._id_offset]
        dds = grid.dds
        gle = grid.LeftEdge
        nle = node.get_left_edge()
        nre = node.get_right_edge()
        li = np.rint((nle - gle) / dds).astype('int32')
        ri = np.rint((nre - gle) / dds).astype('int32')
        dims = (ri - li).astype('int32')
        tree_ok *= np.all(grid.LeftEdge <= nle)
        tree_ok *= np.all(grid.RightEdge >= nre)
        tree_ok *= np.all(dims > 0)

    yield assert_equal, True, tree_ok
def test_amr_kdtree_coverage():
    return #TESTDISABLED
    domain_dims = (32, 32, 32)
    data = np.zeros(domain_dims) + 0.25
    fo = [ic.CoredSphere(0.05, 0.3, [0.7, 0.4, 0.75],
                         {"density": (0.25, 100.0)})]
    rc = [fm.flagging_method_registry["overdensity"](8.0)]
    ug = load_uniform_grid({"density": data}, domain_dims, 1.0)
    ds = refine_amr(ug, rc, fo, 5)

    kd = AMRKDTree(ds)

    volume = kd.count_volume()
    yield assert_equal, volume, \
        np.prod(ds.domain_right_edge - ds.domain_left_edge)

    cells = kd.count_cells()
    true_cells = ds.all_data().quantities['TotalQuantity']('Ones')[0]
    yield assert_equal, cells, true_cells

    # This largely reproduces the AMRKDTree.tree.check_tree() functionality
    tree_ok = True
    for node in depth_traverse(kd.tree.trunk):
        if node.grid is None:
            continue
        grid = ds.index.grids[node.grid - kd._id_offset]
        dds = grid.dds
        gle = grid.LeftEdge
        nle = get_left_edge(node)
        nre = get_right_edge(node)
        li = np.rint((nle-gle)/dds).astype('int32')
        ri = np.rint((nre-gle)/dds).astype('int32')
        dims = (ri - li).astype('int32')
        tree_ok *= np.all(grid.LeftEdge <= nle)
        tree_ok *= np.all(grid.RightEdge >= nre)
        tree_ok *= np.all(dims > 0)

    yield assert_equal, True, tree_ok
Ejemplo n.º 3
0
def test_particle_generator():
    # First generate our pf
    domain_dims = (128, 128, 128)
    dens = np.zeros(domain_dims) + 0.1
    temp = 4. * np.ones(domain_dims)
    fields = {
        "density": (dens, 'code_mass/code_length**3'),
        "temperature": (temp, 'K')
    }
    ug = load_uniform_grid(fields, domain_dims, 1.0)
    fo = [
        ic.BetaModelSphere(1.0, 0.1, 0.5, [0.5, 0.5, 0.5], {"density": (10.0)})
    ]
    rc = [fm.flagging_method_registry["overdensity"](4.0)]
    ds = refine_amr(ug, rc, fo, 3)

    # Now generate particles from density

    field_list = [("io", "particle_position_x"), ("io", "particle_position_y"),
                  ("io", "particle_position_z"), ("io", "particle_index"),
                  ("io", "particle_gas_density")]
    num_particles = 1000000
    field_dict = {("gas", "density"): ("io", "particle_gas_density")}
    sphere = ds.sphere(ds.domain_center, 0.45)

    particles1 = WithDensityParticleGenerator(ds, sphere, num_particles,
                                              field_list)
    particles1.assign_indices()
    particles1.map_grid_fields_to_particles(field_dict)

    # Test to make sure we ended up with the right number of particles per grid
    particles1.apply_to_stream()
    particles_per_grid1 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid1, particles1.NumberOfParticles
    particles_per_grid1 = [
        len(grid["particle_position_x"]) for grid in ds.index.grids
    ]
    yield assert_equal, particles_per_grid1, particles1.NumberOfParticles

    tags = uconcatenate([grid["particle_index"] for grid in ds.index.grids])
    assert (np.unique(tags).size == num_particles)
    # Set up a lattice of particles
    pdims = np.array([64, 64, 64])

    def new_indices():
        # We just add new indices onto the existing ones
        return np.arange((np.product(pdims))) + num_particles

    le = np.array([0.25, 0.25, 0.25])
    re = np.array([0.75, 0.75, 0.75])
    new_field_list = field_list + [("io", "particle_gas_temperature")]
    new_field_dict = {
        ("gas", "density"): ("io", "particle_gas_density"),
        ("gas", "temperature"): ("io", "particle_gas_temperature")
    }

    particles2 = LatticeParticleGenerator(ds, pdims, le, re, new_field_list)
    particles2.assign_indices(function=new_indices)
    particles2.map_grid_fields_to_particles(new_field_dict)

    #Test lattice positions
    xpos = np.unique(particles2["io", "particle_position_x"])
    ypos = np.unique(particles2["io", "particle_position_y"])
    zpos = np.unique(particles2["io", "particle_position_z"])

    xpred = np.linspace(le[0], re[0], num=pdims[0], endpoint=True)
    ypred = np.linspace(le[1], re[1], num=pdims[1], endpoint=True)
    zpred = np.linspace(le[2], re[2], num=pdims[2], endpoint=True)

    assert_almost_equal(xpos, xpred)
    assert_almost_equal(ypos, ypred)
    assert_almost_equal(zpos, zpred)

    #Test the number of particles again
    particles2.apply_to_stream()
    particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid2, particles1.NumberOfParticles + particles2.NumberOfParticles

    [grid.field_data.clear() for grid in ds.index.grids]
    particles_per_grid2 = [
        len(grid["particle_position_x"]) for grid in ds.index.grids
    ]
    yield assert_equal, particles_per_grid2, particles1.NumberOfParticles + particles2.NumberOfParticles

    #Test the uniqueness of tags
    tags = np.concatenate([grid["particle_index"] for grid in ds.index.grids])
    tags.sort()
    yield assert_equal, tags, np.arange((np.product(pdims) + num_particles))

    # Test that the old particles have zero for the new field
    old_particle_temps = [
        grid["particle_gas_temperature"][:particles_per_grid1[i]]
        for i, grid in enumerate(ds.index.grids)
    ]
    test_zeros = [
        np.zeros((particles_per_grid1[i]))
        for i, grid in enumerate(ds.index.grids)
    ]
    yield assert_equal, old_particle_temps, test_zeros

    #Now dump all of these particle fields out into a dict
    pdata = {}
    dd = ds.all_data()
    for field in new_field_list:
        pdata[field] = dd[field]

    #Test the "from-list" generator and particle field clobber
    particles3 = FromListParticleGenerator(ds,
                                           num_particles + np.product(pdims),
                                           pdata)
    particles3.apply_to_stream(clobber=True)

    #Test the number of particles again
    particles_per_grid3 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid3, particles1.NumberOfParticles + particles2.NumberOfParticles
    particles_per_grid2 = [
        len(grid["particle_position_z"]) for grid in ds.index.grids
    ]
    yield assert_equal, particles_per_grid3, particles1.NumberOfParticles + particles2.NumberOfParticles
Ejemplo n.º 4
0
def test_stream_particles():
    num_particles = 100000
    domain_dims = (64, 64, 64)
    dens = np.random.random(domain_dims)
    x = np.random.uniform(size=num_particles)
    y = np.random.uniform(size=num_particles)
    z = np.random.uniform(size=num_particles)
    m = np.ones(num_particles)

    # Field operators and cell flagging methods

    fo = []
    fo.append(ic.TopHatSphere(0.1, [0.2, 0.3, 0.4], {"density": 2.0}))
    fo.append(ic.TopHatSphere(0.05, [0.7, 0.4, 0.75], {"density": 20.0}))
    rc = [fm.flagging_method_registry["overdensity"](1.0)]

    # Check that all of this runs ok without particles

    ug0 = load_uniform_grid({"density": dens}, domain_dims, 1.0, nprocs=8)
    amr0 = refine_amr(ug0, rc, fo, 3)

    grid_data = []

    for grid in amr0.index.grids:

        data = dict(
            left_edge=grid.LeftEdge,
            right_edge=grid.RightEdge,
            level=grid.Level,
            dimensions=grid.ActiveDimensions,
        )

        for field in amr0.field_list:
            data[field] = grid[field]
        grid_data.append(data)

    amr0 = load_amr_grids(grid_data, domain_dims)

    # Now add particles

    fields1 = {
        "density": dens,
        "particle_position_x": x,
        "particle_position_y": y,
        "particle_position_z": z,
        "particle_mass": m,
    }

    fields2 = fields1.copy()

    ug1 = load_uniform_grid(fields1, domain_dims, 1.0)
    ug2 = load_uniform_grid(fields2, domain_dims, 1.0, nprocs=8)

    # Check to make sure the number of particles is the same

    number_of_particles1 = np.sum([grid.NumberOfParticles for grid in ug1.index.grids])
    number_of_particles2 = np.sum([grid.NumberOfParticles for grid in ug2.index.grids])

    assert_equal(number_of_particles1, num_particles)
    assert_equal(number_of_particles1, number_of_particles2)

    for grid in ug2.index.grids:
        tot_parts = grid["io", "particle_position_x"].size
        tot_all_parts = grid["all", "particle_position_x"].size
        assert tot_parts == grid.NumberOfParticles
        assert tot_all_parts == grid.NumberOfParticles

    # Check to make sure the fields have been defined correctly

    for ptype in ("all", "io"):
        assert (
            ug1._get_field_info(ptype, "particle_position_x").sampling_type
            == "particle"
        )
        assert (
            ug1._get_field_info(ptype, "particle_position_y").sampling_type
            == "particle"
        )
        assert (
            ug1._get_field_info(ptype, "particle_position_z").sampling_type
            == "particle"
        )
        assert ug1._get_field_info(ptype, "particle_mass").sampling_type == "particle"
    assert not ug1._get_field_info("gas", "density").sampling_type == "particle"

    for ptype in ("all", "io"):
        assert (
            ug2._get_field_info(ptype, "particle_position_x").sampling_type
            == "particle"
        )
        assert (
            ug2._get_field_info(ptype, "particle_position_y").sampling_type
            == "particle"
        )
        assert (
            ug2._get_field_info(ptype, "particle_position_z").sampling_type
            == "particle"
        )
        assert ug2._get_field_info(ptype, "particle_mass").sampling_type == "particle"
    assert not ug2._get_field_info("gas", "density").sampling_type == "particle"

    # Now refine this

    amr1 = refine_amr(ug1, rc, fo, 3)
    for field in sorted(ug1.field_list):
        assert field in amr1.field_list

    grid_data = []

    for grid in amr1.index.grids:

        data = dict(
            left_edge=grid.LeftEdge,
            right_edge=grid.RightEdge,
            level=grid.Level,
            dimensions=grid.ActiveDimensions,
        )

        for field in amr1.field_list:
            if field[0] not in ("all", "nbody"):
                data[field] = grid[field]

        grid_data.append(data)

    amr2 = load_amr_grids(grid_data, domain_dims)

    # Check everything again

    number_of_particles1 = [grid.NumberOfParticles for grid in amr1.index.grids]
    number_of_particles2 = [grid.NumberOfParticles for grid in amr2.index.grids]

    assert_equal(np.sum(number_of_particles1), num_particles)
    assert_equal(number_of_particles1, number_of_particles2)

    for grid in amr1.index.grids:
        tot_parts = grid["io", "particle_position_x"].size
        tot_all_parts = grid["all", "particle_position_x"].size
        assert tot_parts == grid.NumberOfParticles
        assert tot_all_parts == grid.NumberOfParticles

    for grid in amr2.index.grids:
        tot_parts = grid["io", "particle_position_x"].size
        tot_all_parts = grid["all", "particle_position_x"].size
        assert tot_parts == grid.NumberOfParticles
        assert tot_all_parts == grid.NumberOfParticles

    assert (
        amr1._get_field_info("all", "particle_position_x").sampling_type == "particle"
    )
    assert (
        amr1._get_field_info("all", "particle_position_y").sampling_type == "particle"
    )
    assert (
        amr1._get_field_info("all", "particle_position_z").sampling_type == "particle"
    )
    assert amr1._get_field_info("all", "particle_mass").sampling_type == "particle"
    assert not amr1._get_field_info("gas", "density").sampling_type == "particle"

    assert (
        amr2._get_field_info("all", "particle_position_x").sampling_type == "particle"
    )
    assert (
        amr2._get_field_info("all", "particle_position_y").sampling_type == "particle"
    )
    assert (
        amr2._get_field_info("all", "particle_position_z").sampling_type == "particle"
    )
    assert amr2._get_field_info("all", "particle_mass").sampling_type == "particle"
    assert not amr2._get_field_info("gas", "density").sampling_type == "particle"

    # Now perform similar checks, but with multiple particle types

    num_dm_particles = 30000
    xd = np.random.uniform(size=num_dm_particles)
    yd = np.random.uniform(size=num_dm_particles)
    zd = np.random.uniform(size=num_dm_particles)
    md = np.ones(num_dm_particles)

    num_star_particles = 20000
    xs = np.random.uniform(size=num_star_particles)
    ys = np.random.uniform(size=num_star_particles)
    zs = np.random.uniform(size=num_star_particles)
    ms = 2.0 * np.ones(num_star_particles)

    dens = np.random.random(domain_dims)

    fields3 = {
        "density": dens,
        ("dm", "particle_position_x"): xd,
        ("dm", "particle_position_y"): yd,
        ("dm", "particle_position_z"): zd,
        ("dm", "particle_mass"): md,
        ("star", "particle_position_x"): xs,
        ("star", "particle_position_y"): ys,
        ("star", "particle_position_z"): zs,
        ("star", "particle_mass"): ms,
    }

    fields4 = fields3.copy()

    ug3 = load_uniform_grid(fields3, domain_dims, 1.0)
    ug4 = load_uniform_grid(fields4, domain_dims, 1.0, nprocs=8)

    # Check to make sure the number of particles is the same

    number_of_particles3 = np.sum([grid.NumberOfParticles for grid in ug3.index.grids])
    number_of_particles4 = np.sum([grid.NumberOfParticles for grid in ug4.index.grids])

    assert_equal(number_of_particles3, num_dm_particles + num_star_particles)
    assert_equal(number_of_particles3, number_of_particles4)

    for grid in ug4.index.grids:
        tot_parts = grid["dm", "particle_position_x"].size
        tot_parts += grid["star", "particle_position_x"].size
        tot_all_parts = grid["all", "particle_position_x"].size
        assert tot_parts == grid.NumberOfParticles
        assert tot_all_parts == grid.NumberOfParticles

    # Check to make sure the fields have been defined correctly

    for ptype in ("dm", "star"):
        assert (
            ug3._get_field_info(ptype, "particle_position_x").sampling_type
            == "particle"
        )
        assert (
            ug3._get_field_info(ptype, "particle_position_y").sampling_type
            == "particle"
        )
        assert (
            ug3._get_field_info(ptype, "particle_position_z").sampling_type
            == "particle"
        )
        assert ug3._get_field_info(ptype, "particle_mass").sampling_type == "particle"
        assert (
            ug4._get_field_info(ptype, "particle_position_x").sampling_type
            == "particle"
        )
        assert (
            ug4._get_field_info(ptype, "particle_position_y").sampling_type
            == "particle"
        )
        assert (
            ug4._get_field_info(ptype, "particle_position_z").sampling_type
            == "particle"
        )
        assert ug4._get_field_info(ptype, "particle_mass").sampling_type == "particle"

    # Now refine this

    amr3 = refine_amr(ug3, rc, fo, 3)
    for field in sorted(ug3.field_list):
        assert field in amr3.field_list

    grid_data = []

    for grid in amr3.index.grids:

        data = dict(
            left_edge=grid.LeftEdge,
            right_edge=grid.RightEdge,
            level=grid.Level,
            dimensions=grid.ActiveDimensions,
        )

        for field in amr3.field_list:
            if field[0] not in ("all", "nbody"):
                data[field] = grid[field]

        grid_data.append(data)

    amr4 = load_amr_grids(grid_data, domain_dims)

    # Check everything again

    number_of_particles3 = [grid.NumberOfParticles for grid in amr3.index.grids]
    number_of_particles4 = [grid.NumberOfParticles for grid in amr4.index.grids]

    assert_equal(np.sum(number_of_particles3), num_star_particles + num_dm_particles)
    assert_equal(number_of_particles3, number_of_particles4)

    for ptype in ("dm", "star"):
        assert (
            amr3._get_field_info(ptype, "particle_position_x").sampling_type
            == "particle"
        )
        assert (
            amr3._get_field_info(ptype, "particle_position_y").sampling_type
            == "particle"
        )
        assert (
            amr3._get_field_info(ptype, "particle_position_z").sampling_type
            == "particle"
        )
        assert amr3._get_field_info(ptype, "particle_mass").sampling_type == "particle"
        assert (
            amr4._get_field_info(ptype, "particle_position_x").sampling_type
            == "particle"
        )
        assert (
            amr4._get_field_info(ptype, "particle_position_y").sampling_type
            == "particle"
        )
        assert (
            amr4._get_field_info(ptype, "particle_position_z").sampling_type
            == "particle"
        )
        assert amr4._get_field_info(ptype, "particle_mass").sampling_type == "particle"

    for grid in amr3.index.grids:
        tot_parts = grid["dm", "particle_position_x"].size
        tot_parts += grid["star", "particle_position_x"].size
        tot_all_parts = grid["all", "particle_position_x"].size
        assert tot_parts == grid.NumberOfParticles
        assert tot_all_parts == grid.NumberOfParticles

    for grid in amr4.index.grids:
        tot_parts = grid["dm", "particle_position_x"].size
        tot_parts += grid["star", "particle_position_x"].size
        tot_all_parts = grid["all", "particle_position_x"].size
        assert tot_parts == grid.NumberOfParticles
        assert tot_all_parts == grid.NumberOfParticles
def test_stream_particles() :
    num_particles = 100000
    domain_dims = (64, 64, 64)
    dens = np.random.random(domain_dims) 
    x = np.random.uniform(size=num_particles)
    y = np.random.uniform(size=num_particles)
    z = np.random.uniform(size=num_particles)
    m = np.ones((num_particles))

    # Field operators and cell flagging methods

    fo = []
    fo.append(ic.TopHatSphere(0.1, [0.2,0.3,0.4],{"density": 2.0}))
    fo.append(ic.TopHatSphere(0.05, [0.7,0.4,0.75],{"density": 20.0}))
    rc = [fm.flagging_method_registry["overdensity"](1.0)]
    
    # Check that all of this runs ok without particles
    
    ug0 = load_uniform_grid({"density": dens}, domain_dims, 1.0, nprocs=8)
    amr0 = refine_amr(ug0, rc, fo, 3)

    grid_data = []
    
    for grid in amr0.index.grids :
        
        data = dict(left_edge = grid.LeftEdge,
                    right_edge = grid.RightEdge,
                    level = grid.Level,
                    dimensions = grid.ActiveDimensions,
                    number_of_particles = grid.NumberOfParticles)
    
        for field in amr0.field_list :
            
            data[field] = grid[field]
            
        grid_data.append(data)

    amr0 = load_amr_grids(grid_data, domain_dims, 1.0)
                        
    # Now add particles

    fields1 = {"density": dens,
               "particle_position_x": x,
               "particle_position_y": y,
               "particle_position_z": z,
               "particle_mass": m,
               "number_of_particles": num_particles}

    fields2 = fields1.copy()

    ug1 = load_uniform_grid(fields1, domain_dims, 1.0)
    ug2 = load_uniform_grid(fields2, domain_dims, 1.0, nprocs=8)

    # Check to make sure the number of particles is the same

    number_of_particles1 = np.sum([grid.NumberOfParticles for grid in ug1.index.grids])
    number_of_particles2 = np.sum([grid.NumberOfParticles for grid in ug2.index.grids])
    
    yield assert_equal, number_of_particles1, num_particles
    yield assert_equal, number_of_particles1, number_of_particles2

    # Check to make sure the fields have been defined correctly
    
    for ptype in ("all", "io"):
        assert ug1._get_field_info(ptype, "particle_position_x").particle_type
        assert ug1._get_field_info(ptype, "particle_position_y").particle_type
        assert ug1._get_field_info(ptype, "particle_position_z").particle_type
        assert ug1._get_field_info(ptype, "particle_mass").particle_type
    assert not ug1._get_field_info("gas", "density").particle_type

    for ptype in ("all", "io"):
        assert ug2._get_field_info(ptype, "particle_position_x").particle_type
        assert ug2._get_field_info(ptype, "particle_position_y").particle_type
        assert ug2._get_field_info(ptype, "particle_position_z").particle_type
        assert ug2._get_field_info(ptype, "particle_mass").particle_type
    assert not ug2._get_field_info("gas", "density").particle_type
    
    # Now refine this

    amr1 = refine_amr(ug1, rc, fo, 3)
    for field in sorted(ug1.field_list):
        yield assert_equal, (field in amr1.field_list), True
    
    grid_data = []
    
    for grid in amr1.index.grids :
        
        data = dict(left_edge = grid.LeftEdge,
                    right_edge = grid.RightEdge,
                    level = grid.Level,
                    dimensions = grid.ActiveDimensions,
                    number_of_particles = grid.NumberOfParticles)

        for field in amr1.field_list :

            data[field] = grid[field]
            
        grid_data.append(data)
    
    amr2 = load_amr_grids(grid_data, domain_dims, 1.0)

    # Check everything again

    number_of_particles1 = [grid.NumberOfParticles for grid in amr1.index.grids]
    number_of_particles2 = [grid.NumberOfParticles for grid in amr2.index.grids]
    
    yield assert_equal, np.sum(number_of_particles1), num_particles
    yield assert_equal, number_of_particles1, number_of_particles2
    
    assert amr1._get_field_info("all", "particle_position_x").particle_type
    assert amr1._get_field_info("all", "particle_position_y").particle_type
    assert amr1._get_field_info("all", "particle_position_z").particle_type
    assert amr1._get_field_info("all", "particle_mass").particle_type
    assert not amr1._get_field_info("gas", "density").particle_type
    
    assert amr2._get_field_info("all", "particle_position_x").particle_type
    assert amr2._get_field_info("all", "particle_position_y").particle_type
    assert amr2._get_field_info("all", "particle_position_z").particle_type
    assert amr2._get_field_info("all", "particle_mass").particle_type
    assert not amr2._get_field_info("gas", "density").particle_type
Ejemplo n.º 6
0
def test_stream_particles():
    num_particles = 100000
    domain_dims = (64, 64, 64)
    dens = np.random.random(domain_dims)
    x = np.random.uniform(size=num_particles)
    y = np.random.uniform(size=num_particles)
    z = np.random.uniform(size=num_particles)
    m = np.ones((num_particles))

    # Field operators and cell flagging methods

    fo = []
    fo.append(ic.TopHatSphere(0.1, [0.2, 0.3, 0.4], {"density": 2.0}))
    fo.append(ic.TopHatSphere(0.05, [0.7, 0.4, 0.75], {"density": 20.0}))
    rc = [fm.flagging_method_registry["overdensity"](1.0)]

    # Check that all of this runs ok without particles

    ug0 = load_uniform_grid({"density": dens}, domain_dims, 1.0, nprocs=8)
    amr0 = refine_amr(ug0, rc, fo, 3)

    grid_data = []

    for grid in amr0.index.grids:

        data = dict(left_edge=grid.LeftEdge,
                    right_edge=grid.RightEdge,
                    level=grid.Level,
                    dimensions=grid.ActiveDimensions,
                    number_of_particles=grid.NumberOfParticles)

        for field in amr0.field_list:

            data[field] = grid[field]

        grid_data.append(data)

    amr0 = load_amr_grids(grid_data, domain_dims, 1.0)

    # Now add particles

    fields1 = {
        "density": dens,
        "particle_position_x": x,
        "particle_position_y": y,
        "particle_position_z": z,
        "particle_mass": m,
        "number_of_particles": num_particles
    }

    fields2 = fields1.copy()

    ug1 = load_uniform_grid(fields1, domain_dims, 1.0)
    ug2 = load_uniform_grid(fields2, domain_dims, 1.0, nprocs=8)

    # Check to make sure the number of particles is the same

    number_of_particles1 = np.sum(
        [grid.NumberOfParticles for grid in ug1.index.grids])
    number_of_particles2 = np.sum(
        [grid.NumberOfParticles for grid in ug2.index.grids])

    yield assert_equal, number_of_particles1, num_particles
    yield assert_equal, number_of_particles1, number_of_particles2

    # Check to make sure the fields have been defined correctly

    for ptype in ("all", "io"):
        assert ug1._get_field_info(ptype, "particle_position_x").particle_type
        assert ug1._get_field_info(ptype, "particle_position_y").particle_type
        assert ug1._get_field_info(ptype, "particle_position_z").particle_type
        assert ug1._get_field_info(ptype, "particle_mass").particle_type
    assert not ug1._get_field_info("gas", "density").particle_type

    for ptype in ("all", "io"):
        assert ug2._get_field_info(ptype, "particle_position_x").particle_type
        assert ug2._get_field_info(ptype, "particle_position_y").particle_type
        assert ug2._get_field_info(ptype, "particle_position_z").particle_type
        assert ug2._get_field_info(ptype, "particle_mass").particle_type
    assert not ug2._get_field_info("gas", "density").particle_type

    # Now refine this

    amr1 = refine_amr(ug1, rc, fo, 3)
    for field in sorted(ug1.field_list):
        yield assert_equal, (field in amr1.field_list), True

    grid_data = []

    for grid in amr1.index.grids:

        data = dict(left_edge=grid.LeftEdge,
                    right_edge=grid.RightEdge,
                    level=grid.Level,
                    dimensions=grid.ActiveDimensions,
                    number_of_particles=grid.NumberOfParticles)

        for field in amr1.field_list:

            data[field] = grid[field]

        grid_data.append(data)

    amr2 = load_amr_grids(grid_data, domain_dims, 1.0)

    # Check everything again

    number_of_particles1 = [
        grid.NumberOfParticles for grid in amr1.index.grids
    ]
    number_of_particles2 = [
        grid.NumberOfParticles for grid in amr2.index.grids
    ]

    yield assert_equal, np.sum(number_of_particles1), num_particles
    yield assert_equal, number_of_particles1, number_of_particles2

    assert amr1._get_field_info("all", "particle_position_x").particle_type
    assert amr1._get_field_info("all", "particle_position_y").particle_type
    assert amr1._get_field_info("all", "particle_position_z").particle_type
    assert amr1._get_field_info("all", "particle_mass").particle_type
    assert not amr1._get_field_info("gas", "density").particle_type

    assert amr2._get_field_info("all", "particle_position_x").particle_type
    assert amr2._get_field_info("all", "particle_position_y").particle_type
    assert amr2._get_field_info("all", "particle_position_z").particle_type
    assert amr2._get_field_info("all", "particle_mass").particle_type
    assert not amr2._get_field_info("gas", "density").particle_type
Ejemplo n.º 7
0
def test_particle_generator():
    # First generate our dataset
    domain_dims = (32, 32, 32)
    dens = np.zeros(domain_dims) + 0.1
    temp = 4.0 * np.ones(domain_dims)
    fields = {
        "density": (dens, "code_mass/code_length**3"),
        "temperature": (temp, "K")
    }
    ug = load_uniform_grid(fields, domain_dims, 1.0)
    fo = [
        ic.BetaModelSphere(1.0, 0.1, 0.5, [0.5, 0.5, 0.5], {"density": (10.0)})
    ]
    rc = [fm.flagging_method_registry["overdensity"](4.0)]
    ds = refine_amr(ug, rc, fo, 3)

    # Now generate particles from density

    field_list = [
        ("io", "particle_position_x"),
        ("io", "particle_position_y"),
        ("io", "particle_position_z"),
        ("io", "particle_index"),
        ("io", "particle_gas_density"),
    ]
    num_particles = 10000
    field_dict = {("gas", "density"): ("io", "particle_gas_density")}
    sphere = ds.sphere(ds.domain_center, 0.45)

    particles1 = WithDensityParticleGenerator(ds, sphere, num_particles,
                                              field_list)
    particles1.assign_indices()
    particles1.map_grid_fields_to_particles(field_dict)

    # Test to make sure we ended up with the right number of particles per grid
    particles1.apply_to_stream()
    particles_per_grid1 = [grid.NumberOfParticles for grid in ds.index.grids]
    assert_equal(particles_per_grid1, particles1.NumberOfParticles)
    particles_per_grid1 = [
        len(grid["particle_position_x"]) for grid in ds.index.grids
    ]
    assert_equal(particles_per_grid1, particles1.NumberOfParticles)

    tags = uconcatenate([grid["particle_index"] for grid in ds.index.grids])
    assert np.unique(tags).size == num_particles

    del tags

    # Set up a lattice of particles
    pdims = np.array([32, 32, 32])

    def new_indices():
        # We just add new indices onto the existing ones
        return np.arange((np.product(pdims))) + num_particles

    le = np.array([0.25, 0.25, 0.25])
    re = np.array([0.75, 0.75, 0.75])

    particles2 = LatticeParticleGenerator(ds, pdims, le, re, field_list)
    particles2.assign_indices(function=new_indices)
    particles2.map_grid_fields_to_particles(field_dict)

    # Test lattice positions
    xpos = np.unique(particles2["io", "particle_position_x"])
    ypos = np.unique(particles2["io", "particle_position_y"])
    zpos = np.unique(particles2["io", "particle_position_z"])

    xpred = np.linspace(le[0], re[0], num=pdims[0], endpoint=True)
    ypred = np.linspace(le[1], re[1], num=pdims[1], endpoint=True)
    zpred = np.linspace(le[2], re[2], num=pdims[2], endpoint=True)

    assert_almost_equal(xpos, xpred)
    assert_almost_equal(ypos, ypred)
    assert_almost_equal(zpos, zpred)

    del xpos, ypos, zpos
    del xpred, ypred, zpred

    # Test the number of particles again
    particles2.apply_to_stream()
    particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]
    assert_equal(particles_per_grid2,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)

    [grid.field_data.clear() for grid in ds.index.grids]
    particles_per_grid2 = [
        len(grid["particle_position_x"]) for grid in ds.index.grids
    ]
    assert_equal(particles_per_grid2,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)

    # Test the uniqueness of tags
    tags = np.concatenate([grid["particle_index"] for grid in ds.index.grids])
    tags.sort()
    assert_equal(tags, np.arange((np.product(pdims) + num_particles)))

    del tags

    # Now dump all of these particle fields out into a dict
    pdata = {}
    dd = ds.all_data()
    for field in field_list:
        pdata[field] = dd[field]

    # Test the "from-list" generator and particle field overwrite
    num_particles3 = num_particles + np.product(pdims)
    particles3 = FromListParticleGenerator(ds, num_particles3, pdata)
    particles3.apply_to_stream(overwrite=True)

    # Test the number of particles again
    particles_per_grid3 = [grid.NumberOfParticles for grid in ds.index.grids]
    assert_equal(particles_per_grid3,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)
    particles_per_grid2 = [
        len(grid["particle_position_z"]) for grid in ds.index.grids
    ]
    assert_equal(particles_per_grid3,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)
    assert_equal(particles_per_grid2, particles_per_grid3)

    # Test adding in particles with a different particle type

    num_star_particles = 20000
    pdata2 = {
        ("star", "particle_position_x"):
        np.random.uniform(size=num_star_particles),
        ("star", "particle_position_y"):
        np.random.uniform(size=num_star_particles),
        ("star", "particle_position_z"):
        np.random.uniform(size=num_star_particles),
    }

    particles4 = FromListParticleGenerator(ds,
                                           num_star_particles,
                                           pdata2,
                                           ptype="star")
    particles4.apply_to_stream()

    dd = ds.all_data()
    assert dd["star", "particle_position_x"].size == num_star_particles
    assert dd["io", "particle_position_x"].size == num_particles3
    assert dd[
        "all",
        "particle_position_x"].size == num_star_particles + num_particles3

    del pdata
    del pdata2
    del ds
    del particles1
    del particles2
    del particles4
    del fields
    del dens
    del temp
def test_particle_generator():
    # First generate our dataset
    domain_dims = (128, 128, 128)
    dens = np.zeros(domain_dims) + 0.1
    temp = 4.*np.ones(domain_dims)
    fields = {"density": (dens, 'code_mass/code_length**3'),
              "temperature": (temp, 'K')}
    ug = load_uniform_grid(fields, domain_dims, 1.0)
    fo = [ic.BetaModelSphere(1.0,0.1,0.5,[0.5,0.5,0.5],{"density":(10.0)})]
    rc = [fm.flagging_method_registry["overdensity"](4.0)]
    ds = refine_amr(ug, rc, fo, 3)

    # Now generate particles from density

    field_list = [("io", "particle_position_x"),
                  ("io", "particle_position_y"),
                  ("io", "particle_position_z"),
                  ("io", "particle_index"),
                  ("io", "particle_gas_density")]
    num_particles = 1000000
    field_dict = {("gas", "density"): ("io", "particle_gas_density")}
    sphere = ds.sphere(ds.domain_center, 0.45)

    particles1 = WithDensityParticleGenerator(ds, sphere, num_particles, field_list)
    particles1.assign_indices()
    particles1.map_grid_fields_to_particles(field_dict)
    
    # Test to make sure we ended up with the right number of particles per grid
    particles1.apply_to_stream()
    particles_per_grid1 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid1, particles1.NumberOfParticles
    particles_per_grid1 = [len(grid["particle_position_x"]) for grid in ds.index.grids]
    yield assert_equal, particles_per_grid1, particles1.NumberOfParticles

    tags = uconcatenate([grid["particle_index"] for grid in ds.index.grids])
    assert(np.unique(tags).size == num_particles)
    # Set up a lattice of particles
    pdims = np.array([64,64,64])
    def new_indices() :
        # We just add new indices onto the existing ones
        return np.arange((np.product(pdims)))+num_particles
    le = np.array([0.25,0.25,0.25])
    re = np.array([0.75,0.75,0.75])
    new_field_list = field_list + [("io", "particle_gas_temperature")]
    new_field_dict = {("gas", "density"): ("io", "particle_gas_density"),
                      ("gas", "temperature"): ("io", "particle_gas_temperature")}

    particles2 = LatticeParticleGenerator(ds, pdims, le, re, new_field_list)
    particles2.assign_indices(function=new_indices)
    particles2.map_grid_fields_to_particles(new_field_dict)

    #Test lattice positions
    xpos = np.unique(particles2["io", "particle_position_x"])
    ypos = np.unique(particles2["io", "particle_position_y"])
    zpos = np.unique(particles2["io", "particle_position_z"])

    xpred = np.linspace(le[0],re[0],num=pdims[0],endpoint=True)
    ypred = np.linspace(le[1],re[1],num=pdims[1],endpoint=True)
    zpred = np.linspace(le[2],re[2],num=pdims[2],endpoint=True)

    assert_almost_equal( xpos, xpred)
    assert_almost_equal( ypos, ypred)
    assert_almost_equal( zpos, zpred)

    #Test the number of particles again
    particles2.apply_to_stream()
    particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles

    [grid.field_data.clear() for grid in ds.index.grids]
    particles_per_grid2 = [len(grid["particle_position_x"]) for grid in ds.index.grids]
    yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles

    #Test the uniqueness of tags
    tags = np.concatenate([grid["particle_index"] for grid in ds.index.grids])
    tags.sort()
    yield assert_equal, tags, np.arange((np.product(pdims)+num_particles))

    # Test that the old particles have zero for the new field
    old_particle_temps = [grid["particle_gas_temperature"][:particles_per_grid1[i]]
                          for i, grid in enumerate(ds.index.grids)]
    test_zeros = [np.zeros((particles_per_grid1[i])) 
                  for i, grid in enumerate(ds.index.grids)]
    yield assert_equal, old_particle_temps, test_zeros

    #Now dump all of these particle fields out into a dict
    pdata = {}
    dd = ds.all_data()
    for field in new_field_list :
        pdata[field] = dd[field]

    #Test the "from-list" generator and particle field clobber
    particles3 = FromListParticleGenerator(ds, num_particles+np.product(pdims), pdata)
    particles3.apply_to_stream(clobber=True)
    
    #Test the number of particles again
    particles_per_grid3 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles
    particles_per_grid2 = [len(grid["particle_position_z"]) for grid in ds.index.grids]
    yield assert_equal, particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles