コード例 #1
0
ファイル: __init__.py プロジェクト: perlman/cloud-volume
def to_volumecutout(img,
                    image_type,
                    resolution=None,
                    offset=None,
                    hostname='localhost'):
    from cloudvolume.volumecutout import VolumeCutout
    if type(img) == VolumeCutout:
        try:
            img.dataset_name  # check if it's an intact VolumeCutout
            return img
        except AttributeError:
            pass

    resolution = getresolution(img, resolution)
    offset = getoffset(img, offset)

    return VolumeCutout(
        buf=img,
        path=ExtractedPath('mem', hostname, '/', '', '', '', ''),
        cloudpath='IN MEMORY',
        resolution=resolution,
        mip=-1,
        layer_type=image_type,
        bounds=Bbox(offset, offset + Vec(*(img.shape[:3]))),
        handle=None,
    )
コード例 #2
0
ファイル: __init__.py プロジェクト: stjordanis/cloud-volume
def view(img,
         segmentation=False,
         resolution=None,
         offset=None,
         hostname="localhost",
         port=DEFAULT_PORT):
    from cloudvolume.volumecutout import VolumeCutout

    img = to3d(img)
    resolution = getresolution(img, resolution)
    offset = getoffset(img, offset)

    # Makes sense for viewing not segmentation
    # which requires uints currently. (Jan. 2019)
    if np.dtype(img.dtype).itemsize == 8 and not np.issubdtype(
            img.dtype, np.float64):
        print(
            yellow("""
Converting {} to float64 for display. 
Javascript does not support native 64-bit integer arrays.
      """.format(img.dtype)))
        img = img.astype(np.float64)

    cutout = VolumeCutout(
        buf=img,
        path=ExtractedPath('mem', hostname, '/', '', '', '', ''),
        cloudpath='IN MEMORY',
        resolution=resolution,
        mip=-1,
        layer_type=('segmentation' if segmentation else 'image'),
        bounds=Bbox(offset, offset + Vec(*(img.shape[:3]))),
        handle=None,
    )
    return run([cutout], hostname=hostname, port=port)
コード例 #3
0
def download_sharded(
    requested_bbox, mip,
    meta, cache, spec,
    compress, progress,
    fill_missing, 
    order
  ):

  full_bbox = requested_bbox.expand_to_chunk_size(
    meta.chunk_size(mip), offset=meta.voxel_offset(mip)
  )
  full_bbox = Bbox.clamp(full_bbox, meta.bounds(mip))
  shape = list(requested_bbox.size3()) + [ meta.num_channels ]
  compress_cache = should_compress(meta.encoding(mip), compress, cache, iscache=True)

  chunk_size = meta.chunk_size(mip)
  grid_size = np.ceil(meta.bounds(mip).size3() / chunk_size).astype(np.uint32)

  reader = sharding.ShardReader(meta, cache, spec)
  bounds = meta.bounds(mip)

  renderbuffer = np.zeros(shape=shape, dtype=meta.dtype, order=order)

  gpts = list(gridpoints(full_bbox, bounds, chunk_size))

  code_map = {}
  morton_codes = compressed_morton_code(gpts, grid_size)
  for gridpoint, morton_code in zip(gpts, morton_codes):
    cutout_bbox = Bbox(
      bounds.minpt + gridpoint * chunk_size,
      min2(bounds.minpt + (gridpoint + 1) * chunk_size, bounds.maxpt)
    )
    code_map[morton_code] = cutout_bbox

  all_chunkdata = reader.get_data(list(code_map.keys()), meta.key(mip), progress=progress)
  for zcode, chunkdata in all_chunkdata.items():
    cutout_bbox = code_map[zcode]
    if chunkdata is None:
      if fill_missing:
        chunkdata = None
      else:
        raise EmptyVolumeException(cutout_bbox)

    img3d = decode(
      meta, cutout_bbox, 
      chunkdata, fill_missing, mip
    )

    shade(renderbuffer, requested_bbox, img3d, cutout_bbox)

  return VolumeCutout.from_volume(
    meta, mip, renderbuffer, 
    requested_bbox
  )
コード例 #4
0
ファイル: __init__.py プロジェクト: ZettaAI/cloud-volume
def view(
    img, segmentation=False, resolution=None, offset=None,
    hostname="localhost", port=DEFAULT_PORT
  ):
  from cloudvolume.volumecutout import VolumeCutout

  img = to3d(img)
  resolution = getresolution(img, resolution)
  offset = getoffset(img, offset)

  cutout = VolumeCutout(
    buf=img,
    path=ExtractedPath('mem', hostname, '/', '', '', '', ''),
    cloudpath='IN MEMORY',
    resolution=resolution,
    mip=-1,
    layer_type=('segmentation' if segmentation else 'image'),
    bounds=Bbox( offset, offset + Vec(*(img.shape[:3])) ),
    handle=None,
  )
  return run([ cutout ], hostname=hostname, port=port)
コード例 #5
0
def download_sharded(requested_bbox, mip, meta, cache, spec, compress,
                     progress, fill_missing, order):

    full_bbox = requested_bbox.expand_to_chunk_size(
        meta.chunk_size(mip), offset=meta.voxel_offset(mip))
    full_bbox = Bbox.clamp(full_bbox, meta.bounds(mip))
    shape = list(requested_bbox.size3()) + [meta.num_channels]
    compress_cache = should_compress(meta.encoding(mip),
                                     compress,
                                     cache,
                                     iscache=True)

    chunk_size = meta.chunk_size(mip)
    grid_size = np.ceil(meta.bounds(mip).size3() / chunk_size).astype(
        np.uint32)

    reader = sharding.ShardReader(meta, cache, spec)
    bounds = meta.bounds(mip)

    renderbuffer = np.zeros(shape=shape, dtype=meta.dtype, order=order)

    gpts = tqdm(list(gridpoints(full_bbox, bounds, chunk_size)),
                disable=(not progress),
                desc='Downloading')

    for gridpoint in gpts:
        zcurve_code = compressed_morton_code(gridpoint, grid_size)
        chunkdata = reader.get_data(zcurve_code, meta.key(mip))

        cutout_bbox = Bbox(
            bounds.minpt + gridpoint * chunk_size,
            min2(bounds.minpt + (gridpoint + 1) * chunk_size, bounds.maxpt))

        img3d = decode(meta, cutout_bbox, chunkdata, fill_missing, mip)

        shade(renderbuffer, requested_bbox, img3d, cutout_bbox)

    return VolumeCutout.from_volume(meta, mip, renderbuffer, requested_bbox)
コード例 #6
0
ファイル: rx.py プロジェクト: jhuapl-boss/cloud-volume
def download(requested_bbox,
             mip,
             meta,
             cache,
             fill_missing,
             progress,
             parallel,
             location,
             retain,
             use_shared_memory,
             use_file,
             compress,
             order='F',
             green=False,
             secrets=None,
             renumber=False,
             background_color=0):
    """Cutout a requested bounding box from storage and return it as a numpy array."""

    full_bbox = requested_bbox.expand_to_chunk_size(
        meta.chunk_size(mip), offset=meta.voxel_offset(mip))
    full_bbox = Bbox.clamp(full_bbox, meta.bounds(mip))
    cloudpaths = list(
        chunknames(full_bbox,
                   meta.bounds(mip),
                   meta.key(mip),
                   meta.chunk_size(mip),
                   protocol=meta.path.protocol))
    shape = list(requested_bbox.size3()) + [meta.num_channels]

    compress_cache = should_compress(meta.encoding(mip),
                                     compress,
                                     cache,
                                     iscache=True)

    handle = None

    if renumber and (parallel != 1):
        raise ValueError("renumber is not supported for parallel operation.")

    if use_shared_memory and use_file:
        raise ValueError(
            "use_shared_memory and use_file are mutually exclusive arguments.")

    dtype = np.uint16 if renumber else meta.dtype

    if parallel == 1:
        if use_shared_memory:  # write to shared memory
            handle, renderbuffer = shm.ndarray(shape,
                                               dtype=dtype,
                                               order=order,
                                               location=location,
                                               lock=fs_lock)
            if not retain:
                shm.unlink(location)
        elif use_file:  # write to ordinary file
            handle, renderbuffer = shm.ndarray_fs(shape,
                                                  dtype=dtype,
                                                  order=order,
                                                  location=location,
                                                  lock=fs_lock,
                                                  emulate_shm=False)
            if not retain:
                os.unlink(location)
        else:
            renderbuffer = np.full(shape=shape,
                                   fill_value=background_color,
                                   dtype=dtype,
                                   order=order)

        def process(img3d, bbox):
            shade(renderbuffer, requested_bbox, img3d, bbox)

        remap = {background_color: background_color}
        lock = threading.Lock()
        N = 1

        def process_renumber(img3d, bbox):
            nonlocal N
            nonlocal lock
            nonlocal remap
            nonlocal renderbuffer
            img_labels = fastremap.unique(img3d)
            with lock:
                for lbl in img_labels:
                    if lbl not in remap:
                        remap[lbl] = N
                        N += 1
                if N > np.iinfo(renderbuffer.dtype).max:
                    renderbuffer = fastremap.refit(renderbuffer,
                                                   value=N,
                                                   increase_only=True)

            fastremap.remap(img3d, remap, in_place=True)
            shade(renderbuffer, requested_bbox, img3d, bbox)

        fn = process
        if renumber and not (use_file or use_shared_memory):
            fn = process_renumber

        download_chunks_threaded(meta,
                                 cache,
                                 mip,
                                 cloudpaths,
                                 fn=fn,
                                 fill_missing=fill_missing,
                                 progress=progress,
                                 compress_cache=compress_cache,
                                 green=green,
                                 secrets=secrets,
                                 background_color=background_color)
    else:
        handle, renderbuffer = multiprocess_download(
            requested_bbox,
            mip,
            cloudpaths,
            meta,
            cache,
            compress_cache,
            fill_missing,
            progress,
            parallel,
            location,
            retain,
            use_shared_memory=(use_file == False),
            order=order,
            green=green,
            secrets=secrets,
            background_color=background_color)

    out = VolumeCutout.from_volume(meta,
                                   mip,
                                   renderbuffer,
                                   requested_bbox,
                                   handle=handle)
    if renumber:
        return (out, remap)
    return out
コード例 #7
0
def download(requested_bbox,
             mip,
             meta,
             cache,
             fill_missing,
             progress,
             parallel,
             location,
             retain,
             use_shared_memory,
             use_file,
             compress,
             order='F',
             green=False):
    """Cutout a requested bounding box from storage and return it as a numpy array."""

    full_bbox = requested_bbox.expand_to_chunk_size(
        meta.chunk_size(mip), offset=meta.voxel_offset(mip))
    full_bbox = Bbox.clamp(full_bbox, meta.bounds(mip))
    cloudpaths = list(
        chunknames(full_bbox,
                   meta.bounds(mip),
                   meta.key(mip),
                   meta.chunk_size(mip),
                   protocol=meta.path.protocol))
    shape = list(requested_bbox.size3()) + [meta.num_channels]

    compress_cache = should_compress(meta.encoding(mip),
                                     compress,
                                     cache,
                                     iscache=True)

    handle = None

    if use_shared_memory and use_file:
        raise ValueError(
            "use_shared_memory and use_file are mutually exclusive arguments.")

    if parallel == 1:
        if use_shared_memory:  # write to shared memory
            handle, renderbuffer = shm.ndarray(shape,
                                               dtype=meta.dtype,
                                               order=order,
                                               location=location,
                                               lock=fs_lock)
            if not retain:
                shm.unlink(location)
        elif use_file:  # write to ordinary file
            handle, renderbuffer = shm.ndarray_fs(shape,
                                                  dtype=meta.dtype,
                                                  order=order,
                                                  location=location,
                                                  lock=fs_lock,
                                                  emulate_shm=False)
            if not retain:
                os.unlink(location)
        else:
            renderbuffer = np.zeros(shape=shape, dtype=meta.dtype, order=order)

        def process(img3d, bbox):
            shade(renderbuffer, requested_bbox, img3d, bbox)

        download_chunks_threaded(meta,
                                 cache,
                                 mip,
                                 cloudpaths,
                                 fn=process,
                                 fill_missing=fill_missing,
                                 progress=progress,
                                 compress_cache=compress_cache,
                                 green=green)
    else:
        handle, renderbuffer = multiprocess_download(
            requested_bbox,
            mip,
            cloudpaths,
            meta,
            cache,
            compress_cache,
            fill_missing,
            progress,
            parallel,
            location,
            retain,
            use_shared_memory=(use_file == False),
            order=order,
            green=green,
        )

    return VolumeCutout.from_volume(meta,
                                    mip,
                                    renderbuffer,
                                    requested_bbox,
                                    handle=handle)
コード例 #8
0
ファイル: rx.py プロジェクト: ZettaAI/cloud-volume
def download_sharded(
  requested_bbox, mip,
  meta, cache, lru, spec,
  compress, progress,
  fill_missing, 
  order, background_color
):

  full_bbox = requested_bbox.expand_to_chunk_size(
    meta.chunk_size(mip), offset=meta.voxel_offset(mip)
  )
  full_bbox = Bbox.clamp(full_bbox, meta.bounds(mip))
  shape = list(requested_bbox.size3()) + [ meta.num_channels ]
  compress_cache = should_compress(meta.encoding(mip), compress, cache, iscache=True)

  chunk_size = meta.chunk_size(mip)
  grid_size = np.ceil(meta.bounds(mip).size3() / chunk_size).astype(np.uint32)

  reader = sharding.ShardReader(meta, cache, spec)
  bounds = meta.bounds(mip)

  renderbuffer = np.zeros(shape=shape, dtype=meta.dtype, order=order)

  gpts = list(gridpoints(full_bbox, bounds, chunk_size))

  code_map = {}
  morton_codes = compressed_morton_code(gpts, grid_size)
  for gridpoint, morton_code in zip(gpts, morton_codes):
    cutout_bbox = Bbox(
      bounds.minpt + gridpoint * chunk_size,
      min2(bounds.minpt + (gridpoint + 1) * chunk_size, bounds.maxpt)
    )
    code_map[morton_code] = cutout_bbox

  single_voxel = requested_bbox.volume() == 1

  decode_fn = decode
  if single_voxel:
    decode_fn = partial(decode_single_voxel, requested_bbox.minpt - full_bbox.minpt)

  all_keys = set(code_map.keys())
  lru_keys = set([ key for key in all_keys if key in lru ])
  io_keys = all_keys - lru_keys
  del all_keys

  lru_chunkdata = [ (zcode, lru[zcode]) for zcode in lru_keys ]
  io_chunkdata = reader.get_data(io_keys, meta.key(mip), progress=progress)
  
  for zcode, chunkdata in io_chunkdata.items():
    lru[zcode] = chunkdata

  for zcode, chunkdata in itertools.chain(io_chunkdata.items(), lru_chunkdata):
    cutout_bbox = code_map[zcode]
    img3d = decode_fn(
      meta, cutout_bbox, 
      chunkdata, fill_missing, mip,
      background_color=background_color
    )
    
    if single_voxel:
      renderbuffer[:] = img3d
    else:
      shade(renderbuffer, requested_bbox, img3d, cutout_bbox)

  return VolumeCutout.from_volume(
    meta, mip, renderbuffer, 
    requested_bbox
  )