Exemple #1
0
    def pull_voxel(self,
                   seg_id: int,
                   v_id: int,
                   radius: int = 1) -> Tuple[np.ndarray, Bbox, np.ndarray]:
        """Pull a subvolume around a specified skeleton vertex with of shape [2r+1, 2r+1, 2r+1], in voxels.

        Arguments:
            seg_id: ID of the segment to use, depends on data in s3.
            v_id: ID of the vertex to use, depends on the segment.
            radius: Radius of pulled volume around central voxel, in voxels.
                Optional, default is 1 (3x3 volume is pulled, centered at the vertex).

        Returns:
            img: A 2*nx+1 X 2*ny+1 X 2*nz+1 volume.
            bounds: Bounding box object which contains the bounds of the volume.
            vox_in_img: List of coordinates which locate the initial point in the volume.
        """
        check_type(radius, (int, np.integer))
        if radius < 0:
            raise ValueError(f"{radius} should be nonnegative.")

        voxel = self._get_voxel(seg_id,
                                v_id)  # does type checking for seg_id and v_id
        bounds = Bbox(voxel, voxel)
        seed = bounds.to_list()
        shape = [radius] * 3
        bounds = Bbox(np.subtract(seed[:3], shape),
                      np.add(np.add(seed[3:], shape), 1))
        img = self.pull_bounds_img(bounds)
        # img = self.cv.download(bounds, mip=self.mip)
        vox_in_img = voxel - np.array(bounds.to_list()[:3])
        return np.squeeze(np.array(img)), bounds, vox_in_img
Exemple #2
0
  def select_bounding_boxes(self, dataset_bounds):
    # Sample 1024x1024x1 patches until coverage factor is
    # satisfied. Ensure the patches are non-overlapping and
    # random.
    sample_shape = Bbox( (0,0,0), (2048, 2048, 1) )
    area = self.shape.rectVolume()

    total_patches = int(math.ceil(area / sample_shape.volume()))
    N = int(math.ceil(float(total_patches) * self.coverage_factor))

    # Simplification: We are making patch selection against a discrete
    # grid instead of a continuous space. This removes the influence of
    # overlap in a less complex fashion.
    patch_indicies = set()
    while len(patch_indicies) < N:
      ith_patch = random.randint(0, (total_patches - 1))
      patch_indicies.add(ith_patch)

    gridx = int(math.ceil(self.shape.x / sample_shape.size3().x))

    bboxes = []
    for i in patch_indicies:
      patch_start = Vec( i % gridx, i // gridx, 0 )
      patch_start *= sample_shape.size3()
      patch_start += self.offset
      bbox = Bbox( patch_start, patch_start + sample_shape.size3() )
      bbox = Bbox.clamp(bbox, dataset_bounds)
      bboxes.append(bbox)
    return bboxes
Exemple #3
0
def test_slices_to_global_coords():
    delete_layer()
    cv, _ = create_layer(size=(1024, 1024, 5, 1), offset=(7, 0, 0))

    scale = cv.info['scales'][0]
    scale = copy.deepcopy(scale)
    scale['voxel_offset'] = [3, 0, 0]
    scale['volume_size'] = [512, 512, 5]
    scale['resolution'] = [2, 2, 1]
    scale['key'] = '2_2_1'
    cv.info['scales'].append(scale)
    cv.commit_info()

    assert len(cv.available_mips) == 2

    cv.mip = 1
    slices = cv.slices_to_global_coords(Bbox((100, 100, 1), (500, 512, 2)))

    result = Bbox.from_slices(slices)
    assert result == Bbox((200, 200, 1), (1000, 1024, 2))

    cv.mip = 0
    slices = cv.slices_to_global_coords(Bbox((100, 100, 1), (500, 512, 2)))
    result = Bbox.from_slices(slices)
    assert result == Bbox((100, 100, 1), (500, 512, 2))
Exemple #4
0
def test_bbox_division():
    box = Bbox((0, 2, 4), (4, 8, 16))
    assert (box // 2) == Bbox((0, 1, 2), (2, 4, 8))

    box = Bbox((0, 3, 4), (4, 8, 16))

    assert (box / 2) == Bbox((0, 1.5, 2), (2, 4, 8))
Exemple #5
0
 def __init__(self,
              cloudpath,
              shape,
              offset,
              mip,
              teasar_params,
              will_postprocess,
              info=None,
              object_ids=None,
              mask_ids=None,
              fix_branching=True,
              fix_borders=True,
              fix_avocados=False,
              dust_threshold=1000,
              progress=False,
              parallel=1,
              fill_missing=False,
              sharded=False,
              spatial_index=True,
              spatial_grid_shape=None,
              synapses=None):
     super(SkeletonTask,
           self).__init__(cloudpath, shape, offset, mip, teasar_params,
                          will_postprocess, info, object_ids, mask_ids,
                          fix_branching, fix_borders, fix_avocados,
                          dust_threshold, progress, parallel, fill_missing,
                          bool(sharded), bool(spatial_index),
                          spatial_grid_shape, synapses)
     self.bounds = Bbox(offset, Vec(*shape) + Vec(*offset))
     self.index_bounds = Bbox(offset,
                              Vec(*spatial_grid_shape) + Vec(*offset))
Exemple #6
0
def test_bbox_division():
    box = Bbox((0, 2, 4), (4, 8, 16))
    assert (box // 2) == Bbox((0, 1, 2), (2, 4, 8))

    box = Bbox((0, 3, 4), (4, 8, 16), dtype=np.float32)
    print((box / 2.))
    print(Bbox((0., 1.5, 2.), (2., 4., 8.)))
    assert (box / 2.) == Bbox((0., 1.5, 2.), (2., 4., 8.))
Exemple #7
0
def test_bbox_intersection():
    bbx1 = Bbox((0, 0, 0), (10, 10, 10))
    bbx2 = Bbox((5, 5, 5), (15, 15, 15))

    assert Bbox.intersection(bbx1, bbx2) == Bbox((5, 5, 5), (10, 10, 10))
    assert Bbox.intersection(bbx2, bbx1) == Bbox((5, 5, 5), (10, 10, 10))
    bbx2.minpt = Vec(11, 11, 11)
    assert Bbox.intersection(bbx1, bbx2) == Bbox((0, 0, 0), (0, 0, 0))
def test_bbox_to_mip():
  info = {
    'data_type': 'uint8',
    'mesh': '',
    'num_channels': 1,
    'scales': [
      { 
        'chunk_sizes': [[64, 64, 1]],
        'encoding': 'raw',
        'key': '4_4_40',
        'resolution': [4, 4, 40],
        'size': [1024, 1024, 32],
        'voxel_offset': [35, 0, 1],
      },
      {
        'chunk_sizes': [[64, 64, 1]],
        'encoding': 'raw',
        'key': '8_8_40',
        'resolution': [8, 8, 40],
        'size': [512, 512, 32],
        'voxel_offset': [17, 0, 1],
      },
      {
        'chunk_sizes': [[64, 64, 1]],
        'encoding': 'raw',
        'key': '16_16_40',
        'resolution': [16, 16, 40],
        'size': [256, 256, 32],
        'voxel_offset': [8, 0, 1],
      },
      {
        'chunk_sizes': [[64, 64, 1]],
        'encoding': 'raw',
        'key': '32_32_40',
        'resolution': [32, 32, 40],
        'size': [128, 128, 32],
        'voxel_offset': [4, 0, 1],
      },
    ],
    'type': 'image'
  }
  
  cv = CloudVolume('file:///tmp/removeme/bbox_to_mip', info=info)

  bbox = Bbox( (35,0,1), (1024, 1024, 32))
  res = cv.bbox_to_mip(bbox, 0, 3)
  assert res.minpt.x == 4
  assert res.minpt.y == 0
  assert res.minpt.z == 1

  bbox = Bbox( (4, 0, 1), (128, 128, 32) )
  res = cv.bbox_to_mip(bbox, 3, 0)
  assert res.minpt.x == 32
  assert res.minpt.y == 0
  assert res.minpt.z == 1  

  res = cv.bbox_to_mip(bbox, 0, 0)
  assert res == bbox
Exemple #9
0
def find_sj(seg_vc_df, seg_chunk, vc_labels, mask_chunk, sj_chunk, offset, 
            sj_thresh=30, pad=(3, 3, 2), border_thickness=(5, 5, 2), min_sj_size=25, max_neighbor_count=3):
  '''Find sj partner for each vc'''
  line_annos = []
  offset = np.array(offset)
  pad = np.array(pad) # pad bbox around vc object
  valid_pair_entries = []
  seg_ids_with_vc = np.array((seg_vc_df['seg_id']))
  
  sj_chunk[np.isin(mask_chunk, [1, 2])] = 0
  sj_chunk = ndimage.gaussian_filter(sj_chunk, sigma=(1, 1, 0))
  sj_seeds, _ = ndimage.label(sj_chunk > sj_thresh * 2)
  sj_labels = watershed(-sj_chunk, markers=sj_seeds, mask=sj_chunk > sj_thresh, 
    connectivity=np.ones((3,3,3)))

  input_bb = Bbox((0, 0, 0), seg_chunk.shape)
  
  for ind, row in tqdm(seg_vc_df.iterrows(), total=len(seg_vc_df), disable=True):
    pos = (row.vc_pos - offset).astype(np.int32)

    vc_bbox = Bbox(row.vc_min_pos - offset - pad, row.vc_max_pos - offset + pad)
    inter_bb = Bbox.intersection(input_bb, vc_bbox)
    if np.product(inter_bb.size3()) == 0: 
      continue
    local_slc = inter_bb.to_slices()
    local_offset = offset + inter_bb.minpt
      
    local_vc_labels = vc_labels[local_slc]
    local_vc_mask = local_vc_labels == row.vc_id
    local_sj_chunk = sj_chunk[local_slc]
    local_sj_labels = sj_labels[local_slc]
    
    sj_entries = get_neighbors(local_vc_mask, local_sj_labels, 
      border_thickness, min_sj_size, max_neighbor_count)
    if not len(sj_entries):
      continue
    for s in sj_entries:
      mean_sj_value = np.mean(local_sj_chunk[local_sj_labels == s['id']])
      norm_size = s['size'] * min(mean_sj_value / 128.0, 1.0)
      if norm_size < min_sj_size:
        continue
      valid_pair_entries.append({
        'pre_seg_id': row.seg_id,
        'vc_id': row.vc_id,
        'vc_pos': row.vc_pos,
        'vc_size': row.vc_size,
        'sj_id': s['id'],
        'sj_pos': s['pos'] + local_offset,
        'sj_size': s['size'],
        'sj_norm_size': norm_size,
        'sj_value': mean_sj_value
      })
  synapse_df = pd.DataFrame(valid_pair_entries)
  if not len(synapse_df):
    return None, sj_labels
  synapse_df = keep_max_generic(synapse_df, 'sj_id', 'sj_size')
  return synapse_df, sj_labels
Exemple #10
0
def test_non_aligned_write():
    delete_layer()
    offset = Vec(5, 7, 13)
    cv, _ = create_layer(size=(1024, 1024, 5, 1), offset=offset)

    cv[:] = np.zeros(shape=cv.shape, dtype=cv.dtype)

    # Write inside a single chunk

    onepx = Bbox((10, 200, 15), (11, 201, 16))
    try:
        cv[onepx.to_slices()] = np.ones(shape=onepx.size3(), dtype=cv.dtype)
        assert False
    except AlignmentError:
        pass

    cv.non_aligned_writes = True
    cv[onepx.to_slices()] = np.ones(shape=onepx.size3(), dtype=cv.dtype)
    answer = np.zeros(shape=cv.shape, dtype=cv.dtype)
    answer[5, 193, 2] = 1
    assert np.all(cv[:] == answer)

    # Write across multiple chunks
    cv[:] = np.zeros(shape=cv.shape, dtype=cv.dtype)
    cv.non_aligned_writes = True
    middle = Bbox((512 - 10, 512 - 11, 0), (512 + 10, 512 + 11, 5)) + offset
    cv[middle.to_slices()] = np.ones(shape=middle.size3(), dtype=cv.dtype)
    answer = np.zeros(shape=cv.shape, dtype=cv.dtype)
    answer[502:522, 501:523, :] = 1
    assert np.all(cv[:] == answer)

    cv.non_aligned_writes = False
    try:
        cv[middle.to_slices()] = np.ones(shape=middle.size3(), dtype=cv.dtype)
        assert False
    except AlignmentError:
        pass

    # Big inner shell
    delete_layer()
    cv, _ = create_layer(size=(1024, 1024, 5, 1), offset=offset)
    cv[:] = np.zeros(shape=cv.shape, dtype=cv.dtype)
    middle = Bbox((512 - 150, 512 - 150, 0),
                  (512 + 150, 512 + 150, 5)) + offset

    try:
        cv[middle.to_slices()] = np.ones(shape=middle.size3(), dtype=cv.dtype)
        assert False
    except AlignmentError:
        pass

    cv.non_aligned_writes = True
    cv[middle.to_slices()] = np.ones(shape=middle.size3(), dtype=cv.dtype)
    answer = np.zeros(shape=cv.shape, dtype=cv.dtype)
    answer[362:662, 362:662, :] = 1
    assert np.all(cv[:] == answer)
Exemple #11
0
def test_bbox_to_filename():
    bbx = Bbox([0, 2, 4], [1, 3, 5])

    assert bbx.to_filename() == "0-1_2-3_4-5"
    assert bbx.to_filename(None) == "0-1_2-3_4-5"
    assert bbx.to_filename(0) == "0-1_2-3_4-5"
    assert bbx.to_filename(1) == "0.0-1.0_2.0-3.0_4.0-5.0"

    bbx = Bbox([1.1, 3.2, 5.49], [2.000003, 4.0000000000000005, 6.12372412421])

    assert bbx.to_filename(3) == "1.100-2.000_3.200-4.000_5.490-6.124"
Exemple #12
0
def test_bbox_slicing():
    bbx_rect = Bbox.from_slices(np.s_[1:10, 1:10, 1:10])
    bbx_plane = Bbox.from_slices(np.s_[1:10, 10:1, 1:10])

    assert bbx_rect == Bbox((1, 1, 1), (10, 10, 10))
    assert bbx_plane == Bbox((1, 10, 1), (10, 10, 10))

    try:
        bbx_plane = Bbox.from_slices(np.s_[1:10, 10:1:-1, 1:10])
        assert False
    except ValueError:
        pass

    bbx_plane = Bbox.from_slices(np.s_[1:10, 10:1:1, 1:10])
    assert bbx_plane == Bbox((1, 10, 1), (10, 10, 10))
Exemple #13
0
def test_compute_build_bounding_box():
    delete_layer()
    storage = create_storage()
    random_data = np.random.randint(255, size=(256, 256, 128), dtype=np.uint8)

    # Easy, power of two, 0 offsets
    task_creation.upload_build_chunks(storage,
                                      random_data,
                                      offset=(0, 0, 0),
                                      build_chunk_size=(128, 128, 32))
    bounds, chunk_size = task_creation.compute_build_bounding_box(storage)

    assert np.all(chunk_size == (128, 128, 32))
    assert bounds == Bbox((0, 0, 0), (256, 256, 128))

    # Prime offsets
    delete_layer()
    task_creation.upload_build_chunks(storage,
                                      random_data,
                                      offset=(3, 23, 5),
                                      build_chunk_size=(128, 128, 32))
    bounds, chunk_size = task_creation.compute_build_bounding_box(storage)

    assert np.all(chunk_size == (128, 128, 32))
    assert bounds == Bbox((3, 23, 5), (256 + 3, 256 + 23, 128 + 5))

    # Non-power of two edges, 0 offsets
    random_data2 = random_data[:100, :100, :106]
    delete_layer()
    task_creation.upload_build_chunks(storage,
                                      random_data2,
                                      offset=(0, 0, 0),
                                      build_chunk_size=(32, 32, 32))
    bounds, chunk_size = task_creation.compute_build_bounding_box(storage)

    assert np.all(chunk_size == (32, 32, 32))
    assert bounds == Bbox((0, 0, 0), (100, 100, 106))

    # Non-power of two edges, offsets
    delete_layer()
    task_creation.upload_build_chunks(storage,
                                      random_data2,
                                      offset=(3, 23, 5),
                                      build_chunk_size=(64, 64, 32))
    bounds, chunk_size = task_creation.compute_build_bounding_box(storage)

    assert np.all(chunk_size == (64, 64, 32))
    assert bounds == Bbox((3, 23, 5), (100 + 3, 100 + 23, 106 + 5))
def create_volume_from_image(image, offset, layer_path, layer_type, resolution, encoding):
  assert layer_type in ('image', 'segmentation', 'affinities')

  offset = Vec(*offset)
  volsize = Vec(*image.shape[:3])

  data_type = str(image.dtype)
  bounds = Bbox(offset, offset + volsize)

  neuroglancer_chunk_size = find_closest_divisor(image.shape[:3], closest_to=[64,64,64])

  info = CloudVolume.create_new_info(
    num_channels=1, # Increase this number when we add more tests for RGB
    layer_type=layer_type, 
    data_type=data_type, 
    encoding=encoding,
    resolution=resolution, 
    voxel_offset=bounds.minpt, 
    volume_size=bounds.size3(),
    mesh=(layer_type == 'segmentation'), 
    chunk_size=neuroglancer_chunk_size,
  )

  vol = CloudVolume(layer_path, mip=0, info=info)
  vol.commit_info()
  vol[:,:,:] = image
  return vol
Exemple #15
0
def ImageShardTransferTask(
  src_path: str,
  dst_path: str,
  shape: ShapeType,
  offset: ShapeType,
  mip: int = 0,
  fill_missing: bool = False,
  translate: ShapeType = (0, 0, 0),
  agglomerate: bool = False,
  timestamp: Optional[int] = None,
):
  """
  Generates a sharded image volume from
  a preexisting CloudVolume readable data 
  source. Downsamples are not generated.

  The sharded specification can be read here:
  Shard Container: 
  https://github.com/google/neuroglancer/blob/056a3548abffc3c76c93c7a906f1603ce02b5fa3/src/neuroglancer/datasource/precomputed/sharded.md
  Sharded Images:    
  https://github.com/google/neuroglancer/blob/056a3548abffc3c76c93c7a906f1603ce02b5fa3/src/neuroglancer/datasource/precomputed/volume.md#unsharded-chunk-storage
  """
  shape = Vec(*shape)
  offset = Vec(*offset)
  mip = int(mip)
  fill_missing = bool(fill_missing)
  translate = Vec(*translate)

  src_vol = CloudVolume(
    src_path, fill_missing=fill_missing, 
    mip=mip, bounded=False
  )
  dst_vol = CloudVolume(
    dst_path,
    fill_missing=fill_missing,
    mip=mip,
    compress=None
  )

  dst_bbox = Bbox(offset, offset + shape)
  dst_bbox = Bbox.clamp(dst_bbox, dst_vol.meta.bounds(mip))
  dst_bbox = dst_bbox.expand_to_chunk_size(
    dst_vol.meta.chunk_size(mip), 
    offset=dst_vol.meta.voxel_offset(mip)
  )
  src_bbox = dst_bbox - translate

  img = src_vol.download(
    src_bbox, agglomerate=agglomerate, timestamp=timestamp
  )
  (filename, shard) = dst_vol.image.make_shard(
    img, dst_bbox, mip, progress=False
  )
  del img

  basepath = dst_vol.meta.join(
    dst_vol.cloudpath, dst_vol.meta.key(mip)
  )

  CloudFiles(basepath).put(filename, shard)
Exemple #16
0
    def _create_mesh(self, obj_id, left_bound_offset):
        mesh = self._mesher.get_mesh(
            obj_id,
            simplification_factor=self.options['simplification_factor'],
            max_simplification_error=self.options['max_simplification_error'],
            voxel_centered=True,
        )

        self._mesher.erase(obj_id)

        resolution = self._volume.resolution
        offset = (self._bounds.minpt - self.options['low_padding']).astype(
            np.float32)
        mesh.vertices[:] += (offset - left_bound_offset) * resolution

        mesh_bounds = Bbox(np.amin(mesh.vertices, axis=0),
                           np.amax(mesh.vertices, axis=0))

        if self.options['encoding'] == 'draco':
            mesh_binary = DracoPy.encode(mesh.vertices, mesh.faces,
                                         **self.draco_encoding_settings)
        elif self.options['encoding'] == 'precomputed':
            mesh_binary = mesh.to_precomputed()

        return mesh_binary, mesh_bounds
Exemple #17
0
        def synapses_for_bbox(self, shape, offset):
            """
      Returns { seigd: [ ((x,y,z), swc_label), ... ] 
      where x,y,z are in voxel coordinates with the
      origin set to the bottom left corner of this cutout.
      """
            bbox = Bbox(offset, shape + offset) * vol.resolution
            center = bbox.center()
            diagonal = Vec(*((bbox.maxpt - center)))
            pts = [
                centroids[i, :]
                for i in kdtree.query_ball_point(center, diagonal.length())
            ]
            pts = [
                tuple(Vec(*pt, dtype=int)) for pt in pts if bbox.contains(pt)
            ]

            synapses = defaultdict(list)
            for pt in pts:
                for label, swc_label in labelsmap[pt]:
                    voxel_pt = Vec(*pt,
                                   dtype=np.float32) / vol.resolution - offset
                    synapses[label].append(
                        (tuple(voxel_pt.astype(int)), swc_label))
            return synapses
Exemple #18
0
    def execute(self):
        srccv = CloudVolume(self.src_path)
        destcv = CloudVolume(self.dest_path)

        bounds = Bbox( self.offset, self.shape + self.offset )
        bounds = Bbox.clamp(bounds, srccv.bounds)

        remap = self._get_map()
        watershed_data = srccv[ bounds.to_slices() ]

        # Here's how the remapping works. Numpy has a special
        # indexing that can be used to perform the remap.
        # The remap array is a key:value mapping where the
        # array index is the key and the value is the contents.
        # The watershed_data array contains only data values that
        # are within the length of the remap array.
        #
        # e.g.
        #
        # remap = np.array([1,2,3]) # i.e. 0=>1, 1=>2, 1=>3
        # vals = np.array([0,1,1,1,2,0,2,1,2])
        #
        # remap[vals] # array([1, 2, 2, 2, 3, 1, 3, 2, 3])

        image = remap[watershed_data]
        downsample_and_upload(image, bounds, destcv, self.shape)
Exemple #19
0
  def fetch_z_levels(self):
    bounds = Bbox(self.offset, self.shape[:3] + self.offset)

    levelfilenames = [
      'levels/{}/{}'.format(self.mip, z) \
      for z in range(bounds.minpt.z, bounds.maxpt.z)
    ]
    
    with Storage(self.levels_path) as stor:
      levels = stor.get_files(levelfilenames)

    errors = [ 
      level['filename'] \
      for level in levels if level['content'] == None
    ]

    if len(errors):
      raise Exception(", ".join(
          errors) + " were not defined. Did you run a LuminanceLevelsTask for these slices?")

    levels = [(
      int(os.path.basename(item['filename'])),
      json.loads(item['content'].decode('utf-8'))
    ) for item in levels ]

    levels.sort(key=lambda x: x[0])
    levels = [x[1] for x in levels]
    return [ np.array(x['levels'], dtype=np.uint64) for x in levels ]
Exemple #20
0
  def execute(self):
    srccv = CloudVolume(self.src_path, fill_missing=self.fill_missing, mip=self.mip)
    destcv = CloudVolume(self.dest_path, fill_missing=self.fill_missing, mip=self.mip)

    bounds = Bbox( self.offset, self.shape[:3] + self.offset )
    bounds = Bbox.clamp(bounds, srccv.bounds)
    image = srccv[ bounds.to_slices() ].astype(np.float32)

    zlevels = self.fetch_z_levels()

    nbits = np.dtype(srccv.dtype).itemsize * 8
    maxval = float(2 ** nbits - 1)

    for z in range(bounds.minpt.z, bounds.maxpt.z):
      imagez = z - bounds.minpt.z
      zlevel = zlevels[ imagez ]
      (lower, upper) = self.find_section_clamping_values(zlevel, self.clip_fraction, 1 - self.clip_fraction)
      if lower == upper:
        continue
      img = image[:,:,imagez]
      img = (img - float(lower)) * (maxval /  (float(upper) - float(lower)))
      image[:,:,imagez] = img

    image = np.round(image)
    image = np.clip(image, 0.0, maxval).astype(destcv.dtype)

    bounds += self.translate
    downsample_and_upload(image, bounds, destcv, self.shape)
Exemple #21
0
  def execute(self):
    client = storage.Client.from_service_account_json(
      lib.credentials_path(), project=lib.GCLOUD_PROJECT_NAME
    )
    self._bucket = client.get_bucket(self.bucket_name)
    self._metadata = meta = self._download_metadata()

    self._bounds = Bbox(
      meta['physical_offset_min'], # in voxels
      meta['physical_offset_max']
    )

    shape = Vec(*meta['chunk_voxel_dimensions'])
    shape = Vec(shape.x, shape.y, shape.z, 1)

    if self.layer_type == 'image':
      dtype = meta['image_type'].lower()
      cube = self._materialize_images(shape, dtype)
    elif self.layer_type == 'segmentation':
      dtype = meta['segment_id_type'].lower()
      cube = self._materialize_segmentation(shape, dtype)
    else:
      dtype = meta['affinity_type'].lower()
      return NotImplementedError("Don't know how to get the images for this layer.")

    self._upload_chunk(cube, dtype)
Exemple #22
0
  def execute(self):
    self._mesher = Mesher()

    self._volume = CloudVolume(self.layer_path, self.mip, bounded=False)
    self._bounds = Bbox( self.offset, self.shape + self.offset )
    self._bounds = Bbox.clamp(self._bounds, self._volume.bounds)

    # Marching cubes loves its 1vx overlaps.
    # This avoids lines appearing between
    # adjacent chunks.
    data_bounds = self._bounds.clone()
    data_bounds.minpt -= 1
    data_bounds.maxpt += 1

    self._mesh_dir = None
    if 'meshing' in self._volume.info:
      self._mesh_dir = self._volume.info['meshing']
    elif 'mesh' in self._volume.info:
      self._mesh_dir = self._volume.info['mesh']

    if not self._mesh_dir:
      raise ValueError("The mesh destination is not present in the info file.")

    self._data = self._volume[data_bounds.to_slices()] # chunk_position includes a 1 pixel overlap
    self._compute_meshes()
Exemple #23
0
def get_bboxes(union_bbox,
               chunk_size,
               overlap=(0, 0, 0),
               back_shift_small=False,
               backend='cloudvolume'):
    '''Use ffn subbox calculator to generate sequential overlapping bboxes'''
    if isinstance(union_bbox, Bbox):
        ffn_style_bbox = bounding_box.BoundingBox(np.array(union_bbox.minpt),
                                                  np.array(union_bbox.size3()))
    else:
        ffn_style_bbox = union_bbox

    calc = bounding_box.OrderlyOverlappingCalculator(
        outer_box=ffn_style_bbox,
        sub_box_size=chunk_size,
        overlap=overlap,
        include_small_sub_boxes=True,
        back_shift_small_sub_boxes=back_shift_small)
    bbs = list(calc.generate_sub_boxes())
    if backend == 'ffn':
        pass
    elif backend == 'cloudvolume':
        bbs = [Bbox(a=bb.start, b=bb.start + bb.size) for bb in bbs]
    else:
        raise ValueError('Use either ffn or cloudvolume')
    return bbs
Exemple #24
0
  def execute(self):
    self._volume = CloudVolume(
        self.layer_path, self.options['mip'], bounded=False,
        parallel=self.options['parallel_download'])
    self._bounds = Bbox(self.offset, self.shape + self.offset)
    self._bounds = Bbox.clamp(self._bounds, self._volume.bounds)

    self._mesher = Mesher(self._volume.resolution)

    # Marching cubes loves its 1vx overlaps.
    # This avoids lines appearing between
    # adjacent chunks.
    data_bounds = self._bounds.clone()
    data_bounds.minpt -= self.options['low_padding']
    data_bounds.maxpt += self.options['high_padding']

    self._mesh_dir = None
    if self.options['mesh_dir'] is not None:
      self._mesh_dir = self.options['mesh_dir']
    elif 'mesh' in self._volume.info:
      self._mesh_dir = self._volume.info['mesh']

    if not self._mesh_dir:
      raise ValueError("The mesh destination is not present in the info file.")

    # chunk_position includes the overlap specified by low_padding/high_padding
    self._data = self._volume[data_bounds.to_slices()]
    self._remap()
    self._compute_meshes()
Exemple #25
0
def test_bbox_volume():
    bbx = Bbox((0, 0, 0), (2000, 2000, 2000))
    # important thing is 8B is > int32 size
    assert bbx.volume() == 8000000000

    bbx = bbx.astype(np.float32)
    assert bbx.volume() == 8000000000
Exemple #26
0
 def __init__(self, image_layer_path, convnet_path, mask_layer_path, output_layer_path,
         output_offset, output_shape, patch_size, patch_overlap,
         cropping_margin_size, output_key='output', num_output_channels=3, 
              image_mip=1, output_mip=1, mask_mip=3):
     
     super().__init__(image_layer_path, convnet_path, mask_layer_path, output_layer_path,
             output_offset, output_shape, patch_size, patch_overlap, 
             cropping_margin_size, output_key, num_output_channels, 
             image_mip, output_mip, mask_mip)
     
     output_shape = Vec(*output_shape)
     output_offset = Vec(*output_offset)
     self.image_layer_path = image_layer_path
     self.convnet_path = convnet_path
     self.mask_layer_path = mask_layer_path 
     self.output_layer_path = output_layer_path
     self.output_bounds = Bbox(output_offset, output_shape + output_offset)
     self.patch_size = patch_size
     self.patch_overlap = patch_overlap
     self.cropping_margin_size = cropping_margin_size
     self.output_key = output_key
     self.num_output_channels = num_output_channels
     self.image_mip = image_mip
     self.output_mip = output_mip
     self.mask_mip = mask_mip 
Exemple #27
0
def generate_chunks(meta, img, offset, mip):
    shape = Vec(*img.shape)[:3]
    offset = Vec(*offset)[:3]

    bounds = Bbox(offset, shape + offset)

    alignment_check = bounds.round_to_chunk_size(meta.chunk_size(mip),
                                                 meta.voxel_offset(mip))

    if not np.all(alignment_check.minpt == bounds.minpt):
        raise AlignmentError("""
      Only chunk aligned writes are supported by this function. 

      Got:             {}
      Volume Offset:   {} 
      Nearest Aligned: {}
    """.format(bounds, meta.voxel_offset(mip), alignment_check))

    bounds = Bbox.clamp(bounds, meta.bounds(mip))

    img_offset = bounds.minpt - offset
    img_end = Vec.clamp(bounds.size3() + img_offset, Vec(0, 0, 0), shape)

    for startpt in xyzrange(img_offset, img_end, meta.chunk_size(mip)):
        startpt = startpt.clone()
        endpt = min2(startpt + meta.chunk_size(mip), shape)
        spt = (startpt + bounds.minpt).astype(int)
        ept = (endpt + bounds.minpt).astype(int)
        yield (startpt, endpt, spt, ept)
Exemple #28
0
def to_volumecutout(img,
                    image_type,
                    resolution=None,
                    offset=None,
                    hostname='localhost'):
    from cloudvolume.volumecutout import VolumeCutout
    if type(img) == VolumeCutout:
        try:
            img.dataset_name  # check if it's an intact VolumeCutout
            return img
        except AttributeError:
            pass

    resolution = getresolution(img, resolution)
    offset = getoffset(img, offset)

    return VolumeCutout(
        buf=img,
        path=ExtractedPath('mem', hostname, '/', '', '', '', ''),
        cloudpath='IN MEMORY',
        resolution=resolution,
        mip=-1,
        layer_type=image_type,
        bounds=Bbox(offset, offset + Vec(*(img.shape[:3]))),
        handle=None,
    )
Exemple #29
0
 def execute(self):
   vol = CloudVolume(self.layer_path, self.mip, fill_missing=self.fill_missing)
   bounds = Bbox( self.offset, self.shape + self.offset )
   bounds = Bbox.clamp(bounds, vol.bounds)
   image = vol[ bounds.to_slices() ]
   downsample_and_upload(image, bounds, vol, self.shape, self.mip, self.axis,
                         skip_first=True, zero_as_background=self.zero_as_background)
Exemple #30
0
def view(img,
         segmentation=False,
         resolution=None,
         offset=None,
         hostname="localhost",
         port=DEFAULT_PORT):
    from cloudvolume.volumecutout import VolumeCutout

    img = to3d(img)
    resolution = getresolution(img, resolution)
    offset = getoffset(img, offset)

    # Makes sense for viewing not segmentation
    # which requires uints currently. (Jan. 2019)
    if np.dtype(img.dtype).itemsize == 8 and not np.issubdtype(
            img.dtype, np.float64):
        print(
            yellow("""
Converting {} to float64 for display. 
Javascript does not support native 64-bit integer arrays.
      """.format(img.dtype)))
        img = img.astype(np.float64)

    cutout = VolumeCutout(
        buf=img,
        path=ExtractedPath('mem', hostname, '/', '', '', '', ''),
        cloudpath='IN MEMORY',
        resolution=resolution,
        mip=-1,
        layer_type=('segmentation' if segmentation else 'image'),
        bounds=Bbox(offset, offset + Vec(*(img.shape[:3]))),
        handle=None,
    )
    return run([cutout], hostname=hostname, port=port)