Esempio n. 1
0
def test_H5FileSelect_distributed(container_on_disk, fsel, isel):
    """Load H5 into parallel container while down-selecting axes."""

    sel = {"dset1": (fsel, isel, slice(None)), "dset2": (fsel, slice(None))}

    # Tests are designed to run for 1, 2 or 4 processes
    assert 4 % comm.size == 0

    m = MemGroup.from_hdf5(container_on_disk,
                           selections=sel,
                           distributed=True,
                           comm=comm)

    d1 = dset1[(fsel, isel, slice(None))]
    d2 = dset2[(fsel, slice(None))]

    n, s, e = mpiutil.split_local(d1.shape[0], comm=comm)

    dslice = slice(s, e)

    # For debugging...
    # Need to dereference datasets as this is collective
    # md1 = m["dset1"][:]
    # md2 = m["dset2"][:]
    # for ri in range(comm.size):
    #     if ri == comm.rank:
    #         print(comm.rank)
    #         print(md1.shape, d1.shape, d1[dslice].shape)
    #         print(md1[0, :2, :2] if md1.size else "Empty")
    #         print(d1[dslice][0, :2, :2] if d1[dslice].size else "Empty")
    #         print()
    #     comm.Barrier()

    assert np.all(m["dset1"][:] == d1[dslice])
    assert np.all(m["dset2"][:] == d2[dslice])
Esempio n. 2
0
def container_on_disk():
    fname = "tmp_test_memh5_select.h5"
    container = MemGroup()
    container.create_dataset("dset1", data=dset1.view())
    container.create_dataset("dset2", data=dset2.view())
    container.to_hdf5(fname)
    yield fname

    # tear down
    file_names = glob.glob(fname + "*")
    for fname in file_names:
        os.remove(fname)
Esempio n. 3
0
def container_on_disk():

    fname = "tmp_test_memh5_select_parallel.h5"

    if comm.rank == 0:
        m1 = mpiarray.MPIArray.wrap(dset1, axis=0, comm=MPI.COMM_SELF)
        m2 = mpiarray.MPIArray.wrap(dset2, axis=0, comm=MPI.COMM_SELF)
        container = MemGroup(distributed=True, comm=MPI.COMM_SELF)
        container.create_dataset("dset1", data=m1, distributed=True)
        container.create_dataset("dset2", data=m2, distributed=True)
        container.to_hdf5(fname)

    comm.Barrier()

    yield fname

    comm.Barrier()

    # tear down

    if comm.rank == 0:
        file_names = glob.glob(fname + "*")
        for fname in file_names:
            os.remove(fname)
Esempio n. 4
0
def test_H5FileSelect(container_on_disk):
    """Tests that makes hdf5 objects and tests selecting on their axes."""

    m = MemGroup.from_hdf5(container_on_disk, selections=sel)
    assert np.all(m["dset1"][:] == dset1[(fsel, isel, slice(None))])
    assert np.all(m["dset2"][:] == dset2[(fsel, slice(None))])
Esempio n. 5
0
def acq_list():
    yield [MemGroup.from_hdf5(f) for f in acq_fname_list]
Esempio n. 6
0
def test_stores():
    group = MemGroup()
    data = andata.AnData.from_acq_h5(acq_fname_list, out_group=group)
    assert data._data is group
    assert "vis" in group
    assert group["vis"].name == data.vis.name
Esempio n. 7
0
def test_H5FileSelect_distributed(container_on_disk):
    """Load H5 into parallel container while down-selecting axes."""

    m = MemGroup.from_hdf5(container_on_disk, selections=sel, distributed=True)
    assert np.all(m["dset1"][:] == dset1[(fsel, isel, slice(None))])
    assert np.all(m["dset2"][:] == dset2[(fsel, slice(None))])