Пример #1
0
def test_ndarray_fs():
    location = 'cloudvolume-shm-test-ndarray'
    array_like, array = shm.ndarray_fs(shape=(2, 2, 2),
                                       dtype=np.uint8,
                                       location=location,
                                       lock=None,
                                       emulate_shm=True)
    assert np.all(array == np.zeros(shape=(2, 2, 2), dtype=np.uint8))
    array[:] = 100
    array_like.close()

    array_like, array = shm.ndarray_fs(shape=(2, 2, 2),
                                       dtype=np.uint8,
                                       location=location,
                                       lock=None,
                                       emulate_shm=True)
    assert np.all(array[:] == 100)
    array_like.close()

    full_loc = os.path.join(shm.EMULATED_SHM_DIRECTORY, location)

    assert os.path.exists(full_loc)
    assert os.path.getsize(full_loc) == 8

    assert shm.unlink_fs(location) == True
    assert shm.unlink_fs(location) == False

    try:
        array_like, array = shm.ndarray_fs(shape=(2, 2, 2),
                                           dtype=np.uint8,
                                           location=location,
                                           lock=None,
                                           readonly=True,
                                           emulate_shm=True)
        assert False
    except shm.SharedMemoryReadError:
        pass

    array_like, array = shm.ndarray_fs(shape=(2, 2, 2),
                                       dtype=np.uint8,
                                       location=location,
                                       lock=None,
                                       emulate_shm=True)
    try:
        array_like, array = shm.ndarray_fs(shape=(200, 200, 200),
                                           dtype=np.uint8,
                                           location=location,
                                           lock=None,
                                           readonly=True,
                                           emulate_shm=True)
        assert False
    except shm.SharedMemoryReadError:
        pass

    assert shm.unlink_fs(location) == True
    assert shm.unlink_fs(location) == False

    assert not os.path.exists(full_loc)
Пример #2
0
def child_process_download(meta, cache, mip, compress_cache, dest_bbox,
                           fill_missing, progress, location, use_shared_memory,
                           green, cloudpaths):
    reset_connection_pools()  # otherwise multi-process hangs

    shape = list(dest_bbox.size3()) + [meta.num_channels]

    if use_shared_memory:
        array_like, dest_img = shm.ndarray(shape,
                                           dtype=meta.dtype,
                                           location=location,
                                           lock=fs_lock)
    else:
        array_like, dest_img = shm.ndarray_fs(shape,
                                              dtype=meta.dtype,
                                              location=location,
                                              emulate_shm=False,
                                              lock=fs_lock)

    def process(src_img, src_bbox):
        shade(dest_img, dest_bbox, src_img, src_bbox)

    download_chunks_threaded(meta,
                             cache,
                             mip,
                             cloudpaths,
                             fn=process,
                             fill_missing=fill_missing,
                             progress=progress,
                             compress_cache=compress_cache,
                             green=green)

    array_like.close()
Пример #3
0
def multiprocess_download(requested_bbox,
                          mip,
                          cloudpaths,
                          meta,
                          cache,
                          compress_cache,
                          fill_missing,
                          progress,
                          parallel,
                          location,
                          retain,
                          use_shared_memory,
                          order,
                          green,
                          secrets=None,
                          background_color=0):

    cloudpaths_by_process = []
    length = int(math.ceil(len(cloudpaths) / float(parallel)) or 1)
    for i in range(0, len(cloudpaths), length):
        cloudpaths_by_process.append(cloudpaths[i:i + length])

    cpd = partial(child_process_download, meta, cache, mip, compress_cache,
                  requested_bbox, fill_missing, progress, location,
                  use_shared_memory, green, secrets, background_color)
    parallel_execution(cpd,
                       cloudpaths_by_process,
                       parallel,
                       cleanup_shm=location)

    shape = list(requested_bbox.size3()) + [meta.num_channels]

    if use_shared_memory:
        mmap_handle, renderbuffer = shm.ndarray(shape,
                                                dtype=meta.dtype,
                                                order=order,
                                                location=location,
                                                lock=fs_lock)
    else:
        handle, renderbuffer = shm.ndarray_fs(shape,
                                              dtype=meta.dtype,
                                              order=order,
                                              location=location,
                                              lock=fs_lock,
                                              emulate_shm=False)

    if not retain:
        if use_shared_memory:
            shm.unlink(location)
        else:
            os.unlink(location)

    return mmap_handle, renderbuffer
Пример #4
0
def multiprocess_download(
    requested_bbox, mip, cloudpaths,
    meta, cache, lru, compress_cache,
    fill_missing, progress,
    parallel, location, 
    retain, use_shared_memory, order,
    green, secrets=None, background_color=0,
  ):
  cpd = partial(child_process_download, 
    meta, cache, 
    mip, compress_cache, requested_bbox, 
    fill_missing, progress,
    location, use_shared_memory,
    green, secrets, background_color
  )

  if lru.size > 0:
    for path in cloudpaths:
      lru.pop(path, None)

  parallel_execution(
    cpd, cloudpaths, parallel, 
    progress=progress, 
    desc="Download",
    cleanup_shm=location,
    block_size=750,
  )

  shape = list(requested_bbox.size3()) + [ meta.num_channels ]

  if use_shared_memory:
    mmap_handle, renderbuffer = shm.ndarray(
      shape, dtype=meta.dtype, order=order, 
      location=location, lock=fs_lock
    )
  else:
    handle, renderbuffer = shm.ndarray_fs(
      shape, dtype=meta.dtype, order=order,
      location=location, lock=fs_lock,
      emulate_shm=False
    )

  if meta.encoding(mip) == "raw":
    repopulate_lru_from_shm(meta, mip, lru, renderbuffer, requested_bbox)

  if not retain:
    if use_shared_memory:
      shm.unlink(location)
    else:
      os.unlink(location)

  return mmap_handle, renderbuffer
Пример #5
0
def child_process_download(
    meta, cache, 
    mip, compress_cache, dest_bbox, 
    fill_missing, progress,
    location, use_shared_memory, green,
    secrets, background_color, cloudpaths
  ):
  reset_connection_pools() # otherwise multi-process hangs

  shape = list(dest_bbox.size3()) + [ meta.num_channels ]

  if use_shared_memory:
    array_like, dest_img = shm.ndarray(
      shape, dtype=meta.dtype, 
      location=location, lock=fs_lock
    )
  else:
    array_like, dest_img = shm.ndarray_fs(
      shape, dtype=meta.dtype, 
      location=location, emulate_shm=False, 
      lock=fs_lock
    )

  if background_color != 0:
      dest_img[dest_bbox.to_slices()] = background_color

  def process(src_img, src_bbox):
    shade(dest_img, dest_bbox, src_img, src_bbox)
    if progress:
      # This is not good programming practice, but
      # I could not find a clean way to do this that
      # did not result in warnings about leaked semaphores.
      # progress_queue is created in common.py:initialize_progress_queue
      # as a global for this module.
      progress_queue.put(1)

  download_chunks_threaded(
    meta, cache, None, mip, cloudpaths,
    fn=process, decode_fn=decode, fill_missing=fill_missing,
    progress=False, compress_cache=compress_cache,
    green=green, secrets=secrets, background_color=background_color
  )

  array_like.close()

  return len(cloudpaths)
Пример #6
0
def download(requested_bbox,
             mip,
             meta,
             cache,
             fill_missing,
             progress,
             parallel,
             location,
             retain,
             use_shared_memory,
             use_file,
             compress,
             order='F',
             green=False,
             secrets=None,
             renumber=False,
             background_color=0):
    """Cutout a requested bounding box from storage and return it as a numpy array."""

    full_bbox = requested_bbox.expand_to_chunk_size(
        meta.chunk_size(mip), offset=meta.voxel_offset(mip))
    full_bbox = Bbox.clamp(full_bbox, meta.bounds(mip))
    cloudpaths = list(
        chunknames(full_bbox,
                   meta.bounds(mip),
                   meta.key(mip),
                   meta.chunk_size(mip),
                   protocol=meta.path.protocol))
    shape = list(requested_bbox.size3()) + [meta.num_channels]

    compress_cache = should_compress(meta.encoding(mip),
                                     compress,
                                     cache,
                                     iscache=True)

    handle = None

    if renumber and (parallel != 1):
        raise ValueError("renumber is not supported for parallel operation.")

    if use_shared_memory and use_file:
        raise ValueError(
            "use_shared_memory and use_file are mutually exclusive arguments.")

    dtype = np.uint16 if renumber else meta.dtype

    if parallel == 1:
        if use_shared_memory:  # write to shared memory
            handle, renderbuffer = shm.ndarray(shape,
                                               dtype=dtype,
                                               order=order,
                                               location=location,
                                               lock=fs_lock)
            if not retain:
                shm.unlink(location)
        elif use_file:  # write to ordinary file
            handle, renderbuffer = shm.ndarray_fs(shape,
                                                  dtype=dtype,
                                                  order=order,
                                                  location=location,
                                                  lock=fs_lock,
                                                  emulate_shm=False)
            if not retain:
                os.unlink(location)
        else:
            renderbuffer = np.full(shape=shape,
                                   fill_value=background_color,
                                   dtype=dtype,
                                   order=order)

        def process(img3d, bbox):
            shade(renderbuffer, requested_bbox, img3d, bbox)

        remap = {background_color: background_color}
        lock = threading.Lock()
        N = 1

        def process_renumber(img3d, bbox):
            nonlocal N
            nonlocal lock
            nonlocal remap
            nonlocal renderbuffer
            img_labels = fastremap.unique(img3d)
            with lock:
                for lbl in img_labels:
                    if lbl not in remap:
                        remap[lbl] = N
                        N += 1
                if N > np.iinfo(renderbuffer.dtype).max:
                    renderbuffer = fastremap.refit(renderbuffer,
                                                   value=N,
                                                   increase_only=True)

            fastremap.remap(img3d, remap, in_place=True)
            shade(renderbuffer, requested_bbox, img3d, bbox)

        fn = process
        if renumber and not (use_file or use_shared_memory):
            fn = process_renumber

        download_chunks_threaded(meta,
                                 cache,
                                 mip,
                                 cloudpaths,
                                 fn=fn,
                                 fill_missing=fill_missing,
                                 progress=progress,
                                 compress_cache=compress_cache,
                                 green=green,
                                 secrets=secrets,
                                 background_color=background_color)
    else:
        handle, renderbuffer = multiprocess_download(
            requested_bbox,
            mip,
            cloudpaths,
            meta,
            cache,
            compress_cache,
            fill_missing,
            progress,
            parallel,
            location,
            retain,
            use_shared_memory=(use_file == False),
            order=order,
            green=green,
            secrets=secrets,
            background_color=background_color)

    out = VolumeCutout.from_volume(meta,
                                   mip,
                                   renderbuffer,
                                   requested_bbox,
                                   handle=handle)
    if renumber:
        return (out, remap)
    return out
Пример #7
0
def download(requested_bbox,
             mip,
             meta,
             cache,
             fill_missing,
             progress,
             parallel,
             location,
             retain,
             use_shared_memory,
             use_file,
             compress,
             order='F',
             green=False):
    """Cutout a requested bounding box from storage and return it as a numpy array."""

    full_bbox = requested_bbox.expand_to_chunk_size(
        meta.chunk_size(mip), offset=meta.voxel_offset(mip))
    full_bbox = Bbox.clamp(full_bbox, meta.bounds(mip))
    cloudpaths = list(
        chunknames(full_bbox,
                   meta.bounds(mip),
                   meta.key(mip),
                   meta.chunk_size(mip),
                   protocol=meta.path.protocol))
    shape = list(requested_bbox.size3()) + [meta.num_channels]

    compress_cache = should_compress(meta.encoding(mip),
                                     compress,
                                     cache,
                                     iscache=True)

    handle = None

    if use_shared_memory and use_file:
        raise ValueError(
            "use_shared_memory and use_file are mutually exclusive arguments.")

    if parallel == 1:
        if use_shared_memory:  # write to shared memory
            handle, renderbuffer = shm.ndarray(shape,
                                               dtype=meta.dtype,
                                               order=order,
                                               location=location,
                                               lock=fs_lock)
            if not retain:
                shm.unlink(location)
        elif use_file:  # write to ordinary file
            handle, renderbuffer = shm.ndarray_fs(shape,
                                                  dtype=meta.dtype,
                                                  order=order,
                                                  location=location,
                                                  lock=fs_lock,
                                                  emulate_shm=False)
            if not retain:
                os.unlink(location)
        else:
            renderbuffer = np.zeros(shape=shape, dtype=meta.dtype, order=order)

        def process(img3d, bbox):
            shade(renderbuffer, requested_bbox, img3d, bbox)

        download_chunks_threaded(meta,
                                 cache,
                                 mip,
                                 cloudpaths,
                                 fn=process,
                                 fill_missing=fill_missing,
                                 progress=progress,
                                 compress_cache=compress_cache,
                                 green=green)
    else:
        handle, renderbuffer = multiprocess_download(
            requested_bbox,
            mip,
            cloudpaths,
            meta,
            cache,
            compress_cache,
            fill_missing,
            progress,
            parallel,
            location,
            retain,
            use_shared_memory=(use_file == False),
            order=order,
            green=green,
        )

    return VolumeCutout.from_volume(meta,
                                    mip,
                                    renderbuffer,
                                    requested_bbox,
                                    handle=handle)