def test_subprocess_write(self):
     shm = SharedMemory((100, 100), np.uint16)
     with shm.txn() as m:
         m[:] = 0
     with multiprocessing.Pool(1) as pool:
         pool.apply(write_something, (shm, (40, 50), 89))
     with shm.txn() as m:
         self.assertEqual(m[40, 50], 89)
 def test_subprocess_read(self):
     shm = SharedMemory((100, 100), np.uint16)
     a = np.random.RandomState(1234).randint(0, 65535, (100, 100))
     with shm.txn() as m:
         m[:] = a
     with multiprocessing.Pool(1) as pool:
         result = pool.apply(read_something, (shm, (55, 33)))
     self.assertEqual(a[55, 33], result)
Пример #3
0
 def do_plane(volume:VExtent,
              z0:int,
              z:int,
              sm:SharedMemory,
              path:str,
              compression:int):
     mini_volume = VExtent(
         volume.x0, volume.x1, volume.y0, volume.y1, z, z + 1)
     plane = V.imread(mini_volume, sm.dtype)[0]
     dir_path = os.path.dirname(path)
     if not os.path.exists(dir_path):
         os.makedirs(dir_path, exist_ok=True)
     tifffile.imsave(path, plane, compress=compression)
     with sm.txn() as memory:
         memory[z - z0] = plane
Пример #4
0
    def convert_to_tif_and_blockfs(
            precomputed_path,
            output_pattern:str,
            volume:VExtent=None,
            dtype=None,
            compression=4,
            cores=multiprocessing.cpu_count(),
            io_cores=multiprocessing.cpu_count(),
            voxel_size=(1800, 1800, 2000),
            n_levels:int=5):
        if volume is None:
            volume = V.volume
        if dtype is None:
            dtype = V.dtype

        blockfs_stack = BlockfsStack(volume.shape, precomputed_path)
        blockfs_stack.write_info_file(n_levels, voxel_size)
        directory = blockfs_stack.make_l1_directory(io_cores)
        directory.create()
        directory.start_writer_processes()
        sm = SharedMemory((directory.z_block_size,
                           volume.y1 - volume.y0,
                           volume.x1 - volume.x0), dtype)
        with multiprocessing.Pool(cores) as pool:
            for z0 in tqdm.tqdm(
                    range(volume.z0, volume.z1, directory.z_block_size)):
                z1 = min(volume.z1, z0 + directory.z_block_size)
                futures = []
                for z in range(z0, z1):
                    futures.append(pool.apply_async(
                        do_plane,
                        (volume, z0, z, sm, output_pattern % z, compression)))
                for future in futures:
                    future.get()
                x0 = np.arange(0, sm.shape[2], directory.x_block_size)
                x1 = np.minimum(sm.shape[2], x0 + directory.x_block_size)
                y0 = np.arange(0, sm.shape[1], directory.y_block_size)
                y1 = np.minimum(sm.shape[1], y0 + directory.y_block_size)
                with sm.txn() as memory:
                    for (x0a, x1a), (y0a, y1a) in itertools.product(
                            zip(x0, x1),zip(y0, y1)):
                        directory.write_block(memory[:z1-z0, y0a:y1a, x0a:x1a],
                                              x0a, y0a, z0)
        directory.close()
        for level in range(2, n_levels+1):
            blockfs_stack.write_level_n(level, n_cores=io_cores)
Пример #5
0
class PlaneR:
    def __init__(self,
                 z: int,
                 path: str,
                 shape: typing.Sequence[int],
                 dtype: np.dtype,
                 read_fn: READ_FUNCTION_T = tifffile.imread):
        self.z = z
        self.path = path
        self.shape = shape
        self.dtype = dtype
        self.memory = None
        self.read_fn = read_fn

    def prepare(self):
        if self.memory is None:
            self.memory = SharedMemory(self.shape, self.dtype)

    def read(self):
        with self.memory.txn() as m:
            m[:] = self.read_fn(self.path)
def write_something(shm: SharedMemory, idx, value):
    with shm.txn() as m:
        m[idx] = value
def read_something(shm: SharedMemory, idx):
    with shm.txn() as m:
        return m[idx]
def read_array(shm:SharedMemory, hdf_file, dataset, i0, i1):
    with shm.txn() as memory:
        with h5py.File(hdf_file, "r") as fd:
            memory[i0:i1] = fd[dataset][i0:i1]