Ejemplo n.º 1
0
def create_watershed_remap_tasks(
    map_path, src_layer_path, dest_layer_path, 
    shape=Vec(2048, 2048, 64)
  ):
  shape = Vec(*shape)
  vol = CloudVolume(src_layer_path)

  create_downsample_scales(dest_layer_path, mip=0, ds_shape=shape)

  class WatershedRemapTaskIterator(FinelyDividedTaskIterator):
    def task(self, shape, offset):
      return WatershedRemapTask(
        map_path=map_path,
        src_path=src_layer_path,
        dest_path=dest_layer_path,
        shape=shape.clone(),
        offset=offset.clone(),
      )
    
    def on_finish(self):
      dvol = CloudVolume(dest_layer_path)
      dvol.provenance.processing.append({
        'method': {
          'task': 'WatershedRemapTask',
          'src': src_layer_path,
          'dest': dest_layer_path,
          'remap_file': map_path,
          'shape': list(shape),
        },
        'by': OPERATOR_CONTACT,
        'date': strftime('%Y-%m-%d %H:%M %Z'),
      }) 
      dvol.commit_provenance()

  return WatershedRemapTaskIterator(vol.bounds, shape)
Ejemplo n.º 2
0
def create_meshing_tasks(task_queue, layer_path, mip, shape=Vec(512, 512,
                                                                512)):
    shape = Vec(*shape)
    max_simplification_error = 40

    vol = CloudVolume(layer_path, mip)

    if not 'mesh' in vol.info:
        vol.info['mesh'] = 'mesh_mip_{}_err_{}'.format(
            mip, max_simplification_error)
        vol.commit_info()

    for startpt in tqdm(xyzrange(vol.bounds.minpt, vol.bounds.maxpt, shape),
                        desc="Inserting Mesh Tasks"):
        task = MeshTask(
            layer_path=layer_path,
            mip=vol.mip,
            shape=shape.clone(),
            offset=startpt.clone(),
            max_simplification_error=max_simplification_error,
        )
        task_queue.insert(task)
    task_queue.wait('Uploading MeshTasks')

    vol.provenance.processing.append({
        'method': {
            'task': 'MeshTask',
            'layer_path': layer_path,
            'mip': vol.mip,
            'shape': shape.tolist(),
        },
        'by': USER_EMAIL,
        'date': strftime('%Y-%m-%d %H:%M %Z'),
    })
    vol.commit_provenance()
Ejemplo n.º 3
0
 def __init__(self, map_path, src_path, dest_path, shape, offset):
     super(self.__class__, self).__init__(map_path, src_path, dest_path, shape, offset)
     self.map_path = map_path
     self.src_path = src_path
     self.dest_path = dest_path
     self.shape = Vec(*shape)
     self.offset = Vec(*offset)
Ejemplo n.º 4
0
  def __init__(
    self, src_path, dest_path, levels_path, shape,
    offset, mip, clip_fraction, fill_missing,
    translate, minval, maxval
  ):

    super(ContrastNormalizationTask, self).__init__(
      src_path, dest_path, levels_path, shape, offset,
      mip, clip_fraction, fill_missing, translate,
      minval, maxval
    )
    self.src_path = src_path
    self.dest_path = dest_path
    self.shape = Vec(*shape)
    self.offset = Vec(*offset)
    self.fill_missing = fill_missing
    self.translate = Vec(*translate)
    self.mip = int(mip)
    if isinstance(clip_fraction, Sequence):
      assert len(clip_fraction) == 2
      self.lower_clip_fraction = float(clip_fraction[0])
      self.upper_clip_fraction = float(clip_fraction[1])
    else:
      self.lower_clip_fraction = self.upper_clip_fraction = float(clip_fraction)

    self.minval = minval
    self.maxval = maxval

    self.levels_path = levels_path if levels_path else self.src_path

    assert 0 <= self.lower_clip_fraction <= 1
    assert 0 <= self.upper_clip_fraction <= 1
    assert self.lower_clip_fraction + self.upper_clip_fraction <= 1
Ejemplo n.º 5
0
def create_quantized_affinity_tasks(taskqueue,
                                    src_layer,
                                    dest_layer,
                                    shape,
                                    fill_missing=False):
    shape = Vec(*shape)

    info = create_quantized_affinity_info(src_layer, dest_layer, shape)
    destvol = CloudVolume(dest_layer, info=info)
    destvol.commit_info()

    create_downsample_scales(dest_layer, mip=0, ds_shape=shape)

    for startpt in tqdm(xyzrange(destvol.bounds.minpt, destvol.bounds.maxpt,
                                 shape),
                        desc="Inserting QuantizeAffinities Tasks"):
        task = QuantizeAffinitiesTask(
            source_layer_path=src_layer,
            dest_layer_path=dest_layer,
            shape=list(shape.clone()),
            offset=list(startpt.clone()),
            fill_missing=fill_missing,
        )
        task_queue.insert(task)
    task_queue.wait('Uploading')
Ejemplo n.º 6
0
    def _handle_dataset_boundary(self, data, bbox):
        """
    This logic is used to add a black border along sides
    of the image that touch the dataset boundary which
    results in the closure of the mesh faces on that side.
    """
        if ((not np.any(bbox.minpt == self._volume.bounds.minpt))
                and (not np.any(bbox.maxpt == self._volume.bounds.maxpt))):
            return data, Vec(0, 0, 0)

        shape = Vec(*data.shape, dtype=np.int64)
        offset = Vec(0, 0, 0, 0)
        for i in range(3):
            if bbox.minpt[i] == self._volume.voxel_offset[i]:
                offset[i] += 1
                shape[i] += 1
            if bbox.maxpt[i] == self._volume.bounds.maxpt[i]:
                shape[i] += 1

        slices = (
            slice(offset.x, offset.x + data.shape[0]),
            slice(offset.y, offset.y + data.shape[1]),
            slice(offset.z, offset.z + data.shape[2]),
        )

        mirror_data = np.zeros(shape, dtype=data.dtype, order="F")
        mirror_data[slices] = data
        if offset[0]:
            mirror_data[0, :, :] = 0
        if offset[1]:
            mirror_data[:, 0, :] = 0
        if offset[2]:
            mirror_data[:, :, 0] = 0

        return mirror_data, offset[:3]
Ejemplo n.º 7
0
  def __init__(
    self, src_path, dest_path, levels_path, shape, 
    offset, mip, clip_fraction, fill_missing, 
    translate, minval, maxval
  ):

    super(ContrastNormalizationTask, self).__init__(
      src_path, dest_path, levels_path, shape, offset, 
      mip, clip_fraction, fill_missing, translate,
      minval, maxval
    )
    self.src_path = src_path
    self.dest_path = dest_path
    self.shape = Vec(*shape)
    self.offset = Vec(*offset)
    self.fill_missing = fill_missing
    self.translate = Vec(*translate)
    self.mip = int(mip)
    self.clip_fraction = float(clip_fraction)
    self.minval = minval 
    self.maxval = maxval

    self.levels_path = levels_path if levels_path else self.src_path

    assert 0 <= self.clip_fraction <= 1
Ejemplo n.º 8
0
def create_watershed_remap_tasks(task_queue,
                                 map_path,
                                 src_layer_path,
                                 dest_layer_path,
                                 shape=Vec(2048, 2048, 64)):
    shape = Vec(*shape)
    vol = CloudVolume(src_layer_path)

    create_downsample_scales(dest_layer_path, mip=0, ds_shape=shape)

    for startpt in tqdm(xyzrange(vol.bounds.minpt, vol.bounds.maxpt, shape),
                        desc="Inserting Remap Tasks"):
        task = WatershedRemapTask(
            map_path=map_path,
            src_path=src_layer_path,
            dest_path=dest_layer_path,
            shape=shape.clone(),
            offset=startpt.clone(),
        )
        task_queue.insert(task)
    task_queue.wait('Uploading Remap Tasks')
    dvol = CloudVolume(dest_layer_path)
    dvol.provenance.processing.append({
        'method': {
            'task': 'WatershedRemapTask',
            'src': src_layer_path,
            'dest': dest_layer_path,
            'remap_file': map_path,
            'shape': list(shape),
        },
        'by': '*****@*****.**',
        'date': strftime('%Y-%m-%d %H:%M %Z'),
    })
    dvol.commit_provenance()
Ejemplo n.º 9
0
def generate_chunks(meta, img, offset, mip):
    shape = Vec(*img.shape)[:3]
    offset = Vec(*offset)[:3]

    bounds = Bbox(offset, shape + offset)

    alignment_check = bounds.round_to_chunk_size(meta.chunk_size(mip),
                                                 meta.voxel_offset(mip))

    if not np.all(alignment_check.minpt == bounds.minpt):
        raise AlignmentError("""
      Only chunk aligned writes are supported by this function. 

      Got:             {}
      Volume Offset:   {} 
      Nearest Aligned: {}
    """.format(bounds, meta.voxel_offset(mip), alignment_check))

    bounds = Bbox.clamp(bounds, meta.bounds(mip))

    img_offset = bounds.minpt - offset
    img_end = Vec.clamp(bounds.size3() + img_offset, Vec(0, 0, 0), shape)

    for startpt in xyzrange(img_offset, img_end, meta.chunk_size(mip)):
        startpt = startpt.clone()
        endpt = min2(startpt + meta.chunk_size(mip), shape)
        spt = (startpt + bounds.minpt).astype(int)
        ept = (endpt + bounds.minpt).astype(int)
        yield (startpt, endpt, spt, ept)
Ejemplo n.º 10
0
 def __init__(self, image_layer_path, convnet_path, mask_layer_path, output_layer_path,
         output_offset, output_shape, patch_size, patch_overlap,
         cropping_margin_size, output_key='output', num_output_channels=3, 
              image_mip=1, output_mip=1, mask_mip=3):
     
     super().__init__(image_layer_path, convnet_path, mask_layer_path, output_layer_path,
             output_offset, output_shape, patch_size, patch_overlap, 
             cropping_margin_size, output_key, num_output_channels, 
             image_mip, output_mip, mask_mip)
     
     output_shape = Vec(*output_shape)
     output_offset = Vec(*output_offset)
     self.image_layer_path = image_layer_path
     self.convnet_path = convnet_path
     self.mask_layer_path = mask_layer_path 
     self.output_layer_path = output_layer_path
     self.output_bounds = Bbox(output_offset, output_shape + output_offset)
     self.patch_size = patch_size
     self.patch_overlap = patch_overlap
     self.cropping_margin_size = cropping_margin_size
     self.output_key = output_key
     self.num_output_channels = num_output_channels
     self.image_mip = image_mip
     self.output_mip = output_mip
     self.mask_mip = mask_mip 
Ejemplo n.º 11
0
 def __init__(self, layer_path, shape, offset, mip=0, num_mips=5):
   super(DeleteTask, self).__init__(layer_path, shape, offset, mip, num_mips)
   self.layer_path = layer_path
   self.shape = Vec(*shape)
   self.offset = Vec(*offset)
   self.mip = mip
   self.num_mips = num_mips
Ejemplo n.º 12
0
def ImageShardTransferTask(
  src_path: str,
  dst_path: str,
  shape: ShapeType,
  offset: ShapeType,
  mip: int = 0,
  fill_missing: bool = False,
  translate: ShapeType = (0, 0, 0),
  agglomerate: bool = False,
  timestamp: Optional[int] = None,
):
  """
  Generates a sharded image volume from
  a preexisting CloudVolume readable data 
  source. Downsamples are not generated.

  The sharded specification can be read here:
  Shard Container: 
  https://github.com/google/neuroglancer/blob/056a3548abffc3c76c93c7a906f1603ce02b5fa3/src/neuroglancer/datasource/precomputed/sharded.md
  Sharded Images:    
  https://github.com/google/neuroglancer/blob/056a3548abffc3c76c93c7a906f1603ce02b5fa3/src/neuroglancer/datasource/precomputed/volume.md#unsharded-chunk-storage
  """
  shape = Vec(*shape)
  offset = Vec(*offset)
  mip = int(mip)
  fill_missing = bool(fill_missing)
  translate = Vec(*translate)

  src_vol = CloudVolume(
    src_path, fill_missing=fill_missing, 
    mip=mip, bounded=False
  )
  dst_vol = CloudVolume(
    dst_path,
    fill_missing=fill_missing,
    mip=mip,
    compress=None
  )

  dst_bbox = Bbox(offset, offset + shape)
  dst_bbox = Bbox.clamp(dst_bbox, dst_vol.meta.bounds(mip))
  dst_bbox = dst_bbox.expand_to_chunk_size(
    dst_vol.meta.chunk_size(mip), 
    offset=dst_vol.meta.voxel_offset(mip)
  )
  src_bbox = dst_bbox - translate

  img = src_vol.download(
    src_bbox, agglomerate=agglomerate, timestamp=timestamp
  )
  (filename, shard) = dst_vol.image.make_shard(
    img, dst_bbox, mip, progress=False
  )
  del img

  basepath = dst_vol.meta.join(
    dst_vol.cloudpath, dst_vol.meta.key(mip)
  )

  CloudFiles(basepath).put(filename, shard)
Ejemplo n.º 13
0
 def __init__(self,
              cloudpath,
              shape,
              offset,
              mip,
              teasar_params,
              will_postprocess,
              info=None,
              object_ids=None,
              mask_ids=None,
              fix_branching=True,
              fix_borders=True,
              fix_avocados=False,
              dust_threshold=1000,
              progress=False,
              parallel=1,
              fill_missing=False,
              sharded=False,
              spatial_index=True,
              spatial_grid_shape=None,
              synapses=None):
     super(SkeletonTask,
           self).__init__(cloudpath, shape, offset, mip, teasar_params,
                          will_postprocess, info, object_ids, mask_ids,
                          fix_branching, fix_borders, fix_avocados,
                          dust_threshold, progress, parallel, fill_missing,
                          bool(sharded), bool(spatial_index),
                          spatial_grid_shape, synapses)
     self.bounds = Bbox(offset, Vec(*shape) + Vec(*offset))
     self.index_bounds = Bbox(offset,
                              Vec(*spatial_grid_shape) + Vec(*offset))
Ejemplo n.º 14
0
 def __init__(self, source_layer_path, dest_layer_path, shape, offset, fill_missing=False):
   super(QuantizeAffinitiesTask, self).__init__(source_layer_path, dest_layer_path, shape, offset, fill_missing)
   self.source_layer_path = source_layer_path
   self.dest_layer_path = dest_layer_path
   self.shape = Vec(*shape)
   self.offset = Vec(*offset)
   self.fill_missing = fill_missing
Ejemplo n.º 15
0
  def execute(self):
    client = storage.Client.from_service_account_json(
      lib.credentials_path(), project=lib.GCLOUD_PROJECT_NAME
    )
    self._bucket = client.get_bucket(self.bucket_name)
    self._metadata = meta = self._download_metadata()

    self._bounds = Bbox(
      meta['physical_offset_min'], # in voxels
      meta['physical_offset_max']
    )

    shape = Vec(*meta['chunk_voxel_dimensions'])
    shape = Vec(shape.x, shape.y, shape.z, 1)

    if self.layer_type == 'image':
      dtype = meta['image_type'].lower()
      cube = self._materialize_images(shape, dtype)
    elif self.layer_type == 'segmentation':
      dtype = meta['segment_id_type'].lower()
      cube = self._materialize_segmentation(shape, dtype)
    else:
      dtype = meta['affinity_type'].lower()
      return NotImplementedError("Don't know how to get the images for this layer.")

    self._upload_chunk(cube, dtype)
Ejemplo n.º 16
0
def create_volume_from_image(image, offset, layer_path, layer_type, resolution, encoding):
  assert layer_type in ('image', 'segmentation', 'affinities')

  offset = Vec(*offset)
  volsize = Vec(*image.shape[:3])

  data_type = str(image.dtype)
  bounds = Bbox(offset, offset + volsize)

  neuroglancer_chunk_size = find_closest_divisor(image.shape[:3], closest_to=[64,64,64])

  info = CloudVolume.create_new_info(
    num_channels=1, # Increase this number when we add more tests for RGB
    layer_type=layer_type, 
    data_type=data_type, 
    encoding=encoding,
    resolution=resolution, 
    voxel_offset=bounds.minpt, 
    volume_size=bounds.size3(),
    mesh=(layer_type == 'segmentation'), 
    chunk_size=neuroglancer_chunk_size,
  )

  vol = CloudVolume(layer_path, mip=0, info=info)
  vol.commit_info()
  vol[:,:,:] = image
  return vol
Ejemplo n.º 17
0
def main():
    parser = argparse.ArgumentParser(
        description='Generate field for training set module.')

    parser.add_argument('--pyramid_path', type=str)
    parser.add_argument('--checkpoint_name', type=str, default="checkpoint")
    parser.add_argument('--image_path', type=str)
    parser.add_argument('--prev_field_path', type=str)
    # parser.add_argument('--field_path', type=str)
    parser.add_argument('--dst_dir', type=str)
    parser.add_argument('--gpu', type=str, default="0")
    parser.add_argument('--stage', type=int)
    parser.add_argument('--src_mip', type=int, help='MIP of input')
    parser.add_argument('--dst_mip', type=int, help='MIP of output')
    parser.add_argument('--save_intermediary', action='store_true')
    parser.add_argument('--port', type=int, default=8888)

    parser.set_defaults(redirect_stdout=True)
    args = parser.parse_args()

    assert (os.path.exists(args.pyramid_path))
    assert (os.path.exists(args.image_path))
    assert (os.path.exists(args.prev_field_path))
    world_size = len(args.gpu.split(','))
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = str(args.port)

    img_dset = h5py.File(args.image_path, 'r')['main']
    down_res = Vec(2**args.src_mip, 2**args.src_mip, 1)
    up_res = Vec(2**args.dst_mip, 2**args.dst_mip, 1)
    for mip in [args.src_mip, args.dst_mip]:
        res = Vec(2**mip, 2**mip, 1)
        path = os.path.join(args.dst_dir, str(mip))
        info = CloudVolume.create_new_info(
            num_channels=2,
            layer_type='image',
            data_type='float32',
            encoding='raw',
            resolution=res,
            voxel_offset=[0, 0, 0],
            chunk_size=[img_dset.shape[-1], img_dset.shape[-2], 1],
            volume_size=[
                img_dset.shape[-1], img_dset.shape[-2], img_dset.shape[0]
            ])
        cv = CloudVolume(path, mip=0, info=info, cdn_cache=False)
        cv.commit_info()

    generate_shards_distributed(world_size=world_size,
                                pyramid_path=args.pyramid_path,
                                stage=args.stage,
                                checkpoint_name=args.checkpoint_name,
                                img_path=args.image_path,
                                prev_field_path=args.prev_field_path,
                                dst_dir=args.dst_dir,
                                src_mip=args.src_mip,
                                dst_mip=args.dst_mip)
Ejemplo n.º 18
0
def getoffset(img, offset):
    if offset is not None:
        return Vec(*offset)
    else:
        try:
            return img.bounds.minpt
        except AttributeError:
            return Vec(0, 0, 0)
Ejemplo n.º 19
0
 def __init__(self, map_path, src_path, dest_path, shape, offset):
   super(WatershedRemapTask, self).__init__(
       map_path, src_path, dest_path, shape, offset)
   self.map_path = map_path
   self.src_path = src_path
   self.dest_path = dest_path
   self.shape = Vec(*shape)
   self.offset = Vec(*offset)
Ejemplo n.º 20
0
def getresolution(img, resolution):
    if resolution is not None:
        return Vec(*resolution)
    else:
        try:
            return img.resolution
        except AttributeError:
            return Vec(0, 0, 0)
Ejemplo n.º 21
0
 def __init__(self, src_path, dest_path, shape, offset, fill_missing, translate):
   super(self.__class__, self).__init__(src_path, dest_path, shape, offset, fill_missing, translate)
   self.src_path = src_path
   self.dest_path = dest_path
   self.shape = Vec(*shape)
   self.offset = Vec(*offset)
   self.fill_missing = fill_missing
   self.translate = Vec(*translate)
Ejemplo n.º 22
0
def TransferTask(
        src_path,
        dest_path,
        mip,
        shape,
        offset,
        translate=(0, 0, 0),  # change of origin
        fill_missing=False,
        skip_first=False,
        skip_downsamples=False,
        delete_black_uploads=False,
        background_color=0,
        sparse=False,
        axis='z',
        agglomerate=False,
        timestamp=None,
        compress='gzip',
        factor=None):
    shape = Vec(*shape)
    offset = Vec(*offset)
    fill_missing = bool(fill_missing)
    translate = Vec(*translate)
    delete_black_uploads = bool(delete_black_uploads)
    sparse = bool(sparse)
    skip_first = bool(skip_first)
    skip_downsamples = bool(skip_downsamples)

    srccv = CloudVolume(src_path,
                        fill_missing=fill_missing,
                        mip=mip,
                        bounded=False)
    destcv = CloudVolume(dest_path,
                         fill_missing=fill_missing,
                         mip=mip,
                         delete_black_uploads=delete_black_uploads,
                         background_color=background_color,
                         compress=compress)

    dst_bounds = Bbox(offset, shape + offset)
    dst_bounds = Bbox.clamp(dst_bounds, destcv.bounds)
    src_bounds = dst_bounds - translate
    image = srccv.download(src_bounds,
                           agglomerate=agglomerate,
                           timestamp=timestamp)

    if skip_downsamples:
        destcv[dst_bounds] = image
    else:
        downsample_and_upload(image,
                              dst_bounds,
                              destcv,
                              shape,
                              mip=mip,
                              skip_first=skip_first,
                              sparse=sparse,
                              axis=axis,
                              factor=factor)
Ejemplo n.º 23
0
def MeshSpatialIndex(
  cloudpath:str, 
  shape:Tuple[int,int,int], 
  offset:Tuple[int,int,int], 
  mip:int = 0, 
  fill_missing:bool=False, 
  compress:Optional[Union[str,bool]] = 'gzip', 
  mesh_dir:Optional[str] = None
) -> None:
  """
  The main way to add a spatial index is to use the MeshTask,
  but old datasets or broken datasets may need it to be 
  reconstituted. An alternative use is create the spatial index
  over a different area size than the mesh task.
  """
  cv = CloudVolume(
    cloudpath, mip=mip, 
    bounded=False, fill_missing=fill_missing
  )
  cf = CloudFiles(cloudpath)

  bounds = Bbox(Vec(*offset), Vec(*shape) + Vec(*offset))
  bounds = Bbox.clamp(bounds, cv.bounds)

  data_bounds = bounds.clone()
  data_bounds.maxpt += 1 # match typical Marching Cubes overlap

  precision = cv.mesh.spatial_index.precision
  resolution = cv.resolution 

  if not mesh_dir:
    mesh_dir = cv.info["mesh"]

  # remap: old img -> img
  img, remap = cv.download(data_bounds, renumber=True)
  img = img[...,0]
  slcs = find_objects(img)
  del img
  reverse_map = { v:k for k,v in remap.items() } # img -> old img

  bboxes = {}
  for label, slc in enumerate(slcs):
    if slc is None:
      continue
    mesh_bounds = Bbox.from_slices(slc)
    mesh_bounds += Vec(*offset)
    mesh_bounds *= Vec(*resolution, dtype=np.float32)
    bboxes[str(reverse_map[label+1])] = \
      mesh_bounds.astype(resolution.dtype).to_list()

  bounds = bounds.astype(resolution.dtype) * resolution
  cf.put_json(
    f"{mesh_dir}/{bounds.to_filename(precision)}.spatial",
    bboxes,
    compress=compress,
    cache_control=False,
  )
Ejemplo n.º 24
0
  def __init__(self, src_path, shape, offset, coverage_factor, mip):
    super(self.__class__, self).__init__(src_path, shape, offset, coverage_factor, mip)
    self.src_path = src_path
    self.shape = Vec(*shape)
    self.offset = Vec(*offset)
    self.coverage_factor = coverage_factor
    self.mip = int(mip)

    assert 0 < coverage_factor <= 1, "Coverage Factor must be between 0 and 1"
Ejemplo n.º 25
0
 def __init__(self, shape, offset, layer_path, mip=0, simplification_factor=100, max_simplification_error=40):
   super(MeshTask, self).__init__(shape, offset, layer_path, mip, simplification_factor, max_simplification_error)
   self.shape = Vec(*shape)
   self.offset = Vec(*offset)
   self.mip = mip
   self.layer_path = layer_path
   self.lod = 0 # level of detail -- to be implemented
   self.simplification_factor = simplification_factor
   self.max_simplification_error = max_simplification_error
Ejemplo n.º 26
0
 def __init__(self, layer_path, mip, shape, offset, fill_missing=False, axis='z', zero_as_background=False):
   super(DownsampleTask, self).__init__(layer_path, mip, shape, offset, fill_missing, axis)
   self.layer_path = layer_path
   self.mip = mip
   self.shape = Vec(*shape)
   self.offset = Vec(*offset)
   self.fill_missing = fill_missing
   self.axis = axis
   self.zero_as_background = zero_as_background
Ejemplo n.º 27
0
def ingest(args):
    """Ingest CATMAID tiles with same row,col index across z range 

    Args:
	args: ArgParse object from main
    """
    mip = args.mip 
    row = args.row 
    col = args.col 
    chunk_size = Vec(*args.chunk_size)
    bbox = Bbox(bbox_start, bbox_start + bbox_size)
    chunk_size = [1024, 1024, 1]
    x_start = row*chunk_size[0]
    x_stop = (row+1)*chunk_size[0]
    y_start = col*chunk_size[1]
    y_stop = (col+1)*chunk_size[1]
    z_start = args.z_start
    z_stop = args.z_stop
    info = CloudVolume.create_new_info(
    	num_channels = 1,
    	layer_type = 'image', 
    	data_type = 'uint8', 
    	encoding = 'raw', 
    	resolution = [args.resolution[0]*2**mip, 
    		      args.resolution[1]*2**mip, 
		      args.resolution[2]]
    	voxel_offset = [x_start,
                            y_start, 
                            z_start],
    	chunk_size = chunk_size,
    	volume_size = [chunk_size[0],
                           chunk_size[1],
                           z_stop - z_start] 
    )
    x_range = range(x_start, x_stop)
    y_range = range(y_start, y_stop)
    z_range = range(z_start, z_stop)
    
    url_base = args.url_base 
    ex_url = '{}/{}/{}/{}/{}.jpg'.format(url_base, mip, z_start, row, col)
    vol = CloudVolume(args.dst_path, info=info)
    vol.provenance.description = 'Cutout from CATMAID'
    vol.provenance.processing.append({
        'method': {
            'task': 'ingest',
            'src_path': url_base,
            'dst_path': args.dst_path,
    	'row': row,
    	'col': col,
    	'z_range': [z_start, z_stop],
    	'chunk_size': chunk_size.tolist()
            'mip': args.mip,
            },
        'by': args.owner,
        'date': strftime('%Y-%m-%d%H:%M %Z'),
        })
Ejemplo n.º 28
0
def coordinates2bbox(start: tuple, size: tuple = None, stop: tuple = None):
    # use bounding box of volume
    start = Vec(*start)

    if size is None:
        assert stop is not None
        size = stop - start
    else:
        size = Vec(*size)
    return Bbox.from_delta(start, size)
Ejemplo n.º 29
0
def create_downsample_scales(layer_path,
                             mip,
                             ds_shape,
                             axis='z',
                             preserve_chunk_size=False,
                             chunk_size=None,
                             encoding=None):
    vol = CloudVolume(layer_path, mip)
    shape = min2(vol.volume_size, ds_shape)

    # sometimes we downsample a base layer of 512x512
    # into underlying chunks of 64x64 which permits more scales
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    underlying_shape = vol.mip_underlying(underlying_mip).astype(np.float32)

    if chunk_size:
        underlying_shape = Vec(*chunk_size).astype(np.float32)

    toidx = {'x': 0, 'y': 1, 'z': 2}
    preserved_idx = toidx[axis]
    underlying_shape[preserved_idx] = float('inf')

    scales = downsample_scales.compute_plane_downsampling_scales(
        size=shape,
        preserve_axis=axis,
        max_downsampled_size=int(min(*underlying_shape)),
    )
    scales = scales[1:]  # omit (1,1,1)
    scales = [
        list(map(int, vol.downsample_ratio * Vec(*factor3)))
        for factor3 in scales
    ]

    if len(scales) == 0:
        print("WARNING: No scales generated.")

    for scale in scales:
        vol.add_scale(scale, encoding=encoding, chunk_size=chunk_size)

    if chunk_size is None:
        if preserve_chunk_size or len(scales) == 0:
            chunk_size = vol.scales[mip]['chunk_sizes']
        else:
            chunk_size = vol.scales[mip + 1]['chunk_sizes']
    else:
        chunk_size = [chunk_size]

    if encoding is None:
        encoding = vol.scales[mip]['encoding']

    for i in range(mip + 1, mip + len(scales) + 1):
        vol.scales[i]['chunk_sizes'] = chunk_size

    return vol.commit_info()
Ejemplo n.º 30
0
def BlackoutTask(
  cloudpath, mip, shape, offset,
  value=0, non_aligned_writes=False
):
  shape = Vec(*shape)
  offset = Vec(*offset)
  vol = CloudVolume(cloudpath, mip, non_aligned_writes=non_aligned_writes)
  bounds = Bbox(offset, shape + offset)
  bounds = Bbox.clamp(bounds, vol.bounds)
  img = np.zeros(bounds.size3(), dtype=vol.dtype) + value
  vol[bounds] = img