def test_subprocess_read(self): shm = SharedMemory((100, 100), np.uint16) a = np.random.RandomState(1234).randint(0, 65535, (100, 100)) with shm.txn() as m: m[:] = a with multiprocessing.Pool(1) as pool: result = pool.apply(read_something, (shm, (55, 33))) self.assertEqual(a[55, 33], result)
def test_subprocess_write(self): shm = SharedMemory((100, 100), np.uint16) with shm.txn() as m: m[:] = 0 with multiprocessing.Pool(1) as pool: pool.apply(write_something, (shm, (40, 50), 89)) with shm.txn() as m: self.assertEqual(m[40, 50], 89)
def read_patches(patches_file, model): fields = ("patches_xy", "patches_xz", "patches_yz", "x", "y", "z") shms = [] futures = [] with h5py.File(patches_file, "r") as fd: for field in fields: shms.append(SharedMemory(fd[field].shape, fd[field].dtype)) increment = max(1, shms[0].shape[0] // 100) with multiprocessing.Pool(model.n_workers.get()) as pool: for field, shm in zip(fields, shms): for i0 in range(0, shm.shape[0], increment): i1 = min(i0 + increment, shm.shape[0]) futures.append(pool.apply_async( read_array, (shm, patches_file, field, i0, i1))) for future in tqdm.tqdm(futures): while True: try: future.get(.25) break except multiprocessing.TimeoutError: QApplication.processEvents() results = [] for shm in shms: with shm.txn() as memory: results.append(memory.copy()) return results
def convert_to_tif_and_blockfs( precomputed_path, output_pattern:str, volume:VExtent=None, dtype=None, compression=4, cores=multiprocessing.cpu_count(), io_cores=multiprocessing.cpu_count(), voxel_size=(1800, 1800, 2000), n_levels:int=5): if volume is None: volume = V.volume if dtype is None: dtype = V.dtype blockfs_stack = BlockfsStack(volume.shape, precomputed_path) blockfs_stack.write_info_file(n_levels, voxel_size) directory = blockfs_stack.make_l1_directory(io_cores) directory.create() directory.start_writer_processes() sm = SharedMemory((directory.z_block_size, volume.y1 - volume.y0, volume.x1 - volume.x0), dtype) with multiprocessing.Pool(cores) as pool: for z0 in tqdm.tqdm( range(volume.z0, volume.z1, directory.z_block_size)): z1 = min(volume.z1, z0 + directory.z_block_size) futures = [] for z in range(z0, z1): futures.append(pool.apply_async( do_plane, (volume, z0, z, sm, output_pattern % z, compression))) for future in futures: future.get() x0 = np.arange(0, sm.shape[2], directory.x_block_size) x1 = np.minimum(sm.shape[2], x0 + directory.x_block_size) y0 = np.arange(0, sm.shape[1], directory.y_block_size) y1 = np.minimum(sm.shape[1], y0 + directory.y_block_size) with sm.txn() as memory: for (x0a, x1a), (y0a, y1a) in itertools.product( zip(x0, x1),zip(y0, y1)): directory.write_block(memory[:z1-z0, y0a:y1a, x0a:x1a], x0a, y0a, z0) directory.close() for level in range(2, n_levels+1): blockfs_stack.write_level_n(level, n_cores=io_cores)
def do_plane(volume:VExtent, z0:int, z:int, sm:SharedMemory, path:str, compression:int): mini_volume = VExtent( volume.x0, volume.x1, volume.y0, volume.y1, z, z + 1) plane = V.imread(mini_volume, sm.dtype)[0] dir_path = os.path.dirname(path) if not os.path.exists(dir_path): os.makedirs(dir_path, exist_ok=True) tifffile.imsave(path, plane, compress=compression) with sm.txn() as memory: memory[z - z0] = plane
def main(args=sys.argv[1:]): global DIRECTORY opts = parse_args(args) DIRECTORY = Directory.open(opts.input) mem_z_block_size = opts.memory * 1000 * 1000 * 1000 // \ DIRECTORY.y_extent // DIRECTORY.x_extent // 2 z_block_size = min(DIRECTORY.z_block_size, mem_z_block_size) shm = SharedMemory((z_block_size, DIRECTORY.y_extent, DIRECTORY.x_extent), DIRECTORY.dtype) dirnames = set() for z0 in range(0, DIRECTORY.z_extent, z_block_size): with multiprocessing.Pool(opts.n_workers) as pool: z1 = min(z0 + z_block_size, DIRECTORY.z_extent) yr = range(0, DIRECTORY.y_extent, DIRECTORY.y_block_size) xr = range(0, DIRECTORY.x_extent, DIRECTORY.x_block_size) futures = [] for x0, y0 in itertools.product(xr, yr): x1 = min(x0 + DIRECTORY.x_block_size, DIRECTORY.x_extent) y1 = min(y0 + DIRECTORY.y_block_size, DIRECTORY.y_extent) futures.append( pool.apply_async(read_block, (shm, 0, 0, z0, x0, x1, y0, y1, z0, z1))) for future in tqdm.tqdm(futures, desc="Reading %d:%d" % (z0, z1), disable=opts.silent): future.get() futures = [] for z in range(z0, z1): path = opts.output_pattern % z dirname = os.path.dirname(path) if dirname not in dirnames: if not os.path.exists(dirname): os.makedirs(dirname) dirnames.add(dirname) futures.append( pool.apply_async(write_plane, (shm, path, z - z0, opts.psnr))) for future in tqdm.tqdm(futures, desc="Writing %d:%d" % (z0, z1), disable=opts.silent): future.get()
class PlaneR: def __init__(self, z: int, path: str, shape: typing.Sequence[int], dtype: np.dtype, read_fn: READ_FUNCTION_T = tifffile.imread): self.z = z self.path = path self.shape = shape self.dtype = dtype self.memory = None self.read_fn = read_fn def prepare(self): if self.memory is None: self.memory = SharedMemory(self.shape, self.dtype) def read(self): with self.memory.txn() as m: m[:] = self.read_fn(self.path)
def write_something(shm: SharedMemory, idx, value): with shm.txn() as m: m[idx] = value
def read_something(shm: SharedMemory, idx): with shm.txn() as m: return m[idx]
def read_array(shm:SharedMemory, hdf_file, dataset, i0, i1): with shm.txn() as memory: with h5py.File(hdf_file, "r") as fd: memory[i0:i1] = fd[dataset][i0:i1]
def prepare(self): if self.memory is None: self.memory = SharedMemory(self.shape, self.dtype)