def test_memdisk(self): f = memh5.MemDiskGroup(self.fname) self.assertEqual(set(f.keys()), set(f._data.keys())) m = memh5.MemDiskGroup(memh5.MemGroup.from_hdf5(self.fname)) self.assertEqual(set(m.keys()), set(f.keys())) # Recursive indexing. self.assertEqual(set(f['/level1/'].keys()), set(m['/level1/'].keys())) self.assertEqual(set(f.keys()), set(m['/level1']['/'].keys())) self.assertTrue(np.all(f['/level1/large'][:] == m['/level1/large'])) gf = f.create_group('/level1/level2/level3/') df = gf.create_dataset('new', data=np.arange(5)) gm = m.create_group('/level1/level2/level3/') dm = gm.create_dataset('new', data=np.arange(5)) self.assertTrue(np.all(f['/level1/level2/level3/new'][:] == m['/level1/level2/level3/new'][:]))
def test_misc(self): dg = memh5.MemDiskGroup(distributed=True) pdset = dg.create_dataset('parallel_data', shape=(10,), dtype=np.float64, distributed=True, distributed_axis=0) # pdset[:] = dg._data.comm.rank pdset[:] = rank # Test successfully added self.assertIn('parallel_data', dg) dg.save(self.fname) dg2 = memh5.MemDiskGroup.from_file(self.fname, distributed=True) # Test successful load self.assertIn('parallel_data', dg2) self.assertTrue((dg['parallel_data'][:] == dg2['parallel_data'][:]).all()) # self.assertRaises(NotImplementedError, dg.to_disk, self.fname) # Test refusal to base off a h5py object when distributed from caput import mpiutil with h5py.File(self.fname, 'r') as f: if comm is not None: self.assertRaises(ValueError, memh5.MemDiskGroup, data_group=f, distributed=True) mpiutil.barrier()