Exemplo n.º 1
0
def TransferTask(
        src_path,
        dest_path,
        mip,
        shape,
        offset,
        translate=(0, 0, 0),  # change of origin
        fill_missing=False,
        skip_first=False,
        skip_downsamples=False,
        delete_black_uploads=False,
        background_color=0,
        sparse=False,
        axis='z',
        agglomerate=False,
        timestamp=None,
        compress='gzip',
        factor=None):
    shape = Vec(*shape)
    offset = Vec(*offset)
    fill_missing = bool(fill_missing)
    translate = Vec(*translate)
    delete_black_uploads = bool(delete_black_uploads)
    sparse = bool(sparse)
    skip_first = bool(skip_first)
    skip_downsamples = bool(skip_downsamples)

    srccv = CloudVolume(src_path,
                        fill_missing=fill_missing,
                        mip=mip,
                        bounded=False)
    destcv = CloudVolume(dest_path,
                         fill_missing=fill_missing,
                         mip=mip,
                         delete_black_uploads=delete_black_uploads,
                         background_color=background_color,
                         compress=compress)

    dst_bounds = Bbox(offset, shape + offset)
    dst_bounds = Bbox.clamp(dst_bounds, destcv.bounds)
    src_bounds = dst_bounds - translate
    image = srccv.download(src_bounds,
                           agglomerate=agglomerate,
                           timestamp=timestamp)

    if skip_downsamples:
        destcv[dst_bounds] = image
    else:
        downsample_and_upload(image,
                              dst_bounds,
                              destcv,
                              shape,
                              mip=mip,
                              skip_first=skip_first,
                              sparse=sparse,
                              axis=axis,
                              factor=factor)
Exemplo n.º 2
0
  def execute(self):
    self._volume = CloudVolume(
      self.layer_path, self.options['mip'], bounded=False,
      parallel=self.options['parallel_download'], 
      fill_missing=self.options['fill_missing']
    )
    self._bounds = Bbox(self.offset, self.shape + self.offset)
    self._bounds = Bbox.clamp(self._bounds, self._volume.bounds)

    self.progress = bool(self.options['progress'])

    self._mesher = zmesh.Mesher(self._volume.resolution)

    # Marching cubes loves its 1vx overlaps.
    # This avoids lines appearing between
    # adjacent chunks.
    data_bounds = self._bounds.clone()
    data_bounds.minpt -= self.options['low_padding']
    data_bounds.maxpt += self.options['high_padding']

    self._mesh_dir = self.get_mesh_dir()

    if self.options['encoding'] == 'draco':
      self.draco_encoding_settings = draco_encoding_settings(
        shape=(self.shape + self.options['low_padding'] + self.options['high_padding']),
        offset=self.offset,
        resolution=self._volume.resolution,
        compression_level=self.options["draco_compression_level"],
        create_metadata=self.options['draco_create_metadata'],
        uses_new_draco_bin_size=False,
      )

    # chunk_position includes the overlap specified by low_padding/high_padding
    # agglomerate, timestamp, stop_layer only applies to graphene volumes, 
    # no-op for precomputed
    data = self._volume.download(
      data_bounds, 
      agglomerate=self.options['agglomerate'], 
      timestamp=self.options['timestamp'], 
      stop_layer=self.options['stop_layer']
    )

    if not np.any(data):
      if self.options['spatial_index']:
        self._upload_spatial_index(self._bounds, {})
      return

    data = self._remove_dust(data, self.options['dust_threshold'])
    data = self._remap(data)

    if self.options['object_ids']:
      data = fastremap.mask_except(data, self.options['object_ids'], in_place=True)

    data, renumbermap = fastremap.renumber(data, in_place=True)
    renumbermap = { v:k for k,v in renumbermap.items() }
    self.compute_meshes(data, renumbermap)
Exemplo n.º 3
0
  def execute(self):
    srccv = CloudVolume(self.src_path, fill_missing=self.fill_missing, mip=self.mip)
    destcv = CloudVolume(self.dest_path, fill_missing=self.fill_missing, mip=self.mip)

    bounds = Bbox(self.offset, self.shape + self.offset)
    bounds = Bbox.clamp(bounds, srccv.bounds)
    image = srccv[bounds.to_slices()]
    bounds += self.translate
    bounds = Bbox.clamp(bounds, destcv.bounds)
    downsample_and_upload(image, bounds, destcv, self.shape, mip=self.mip)
Exemplo n.º 4
0
def test_slices_from_global_coords():
    delete_layer()
    cv, data = create_layer(size=(1024, 1024, 5, 1), offset=(7,0,0))

    bbox = Bbox( (10, 10, 1), (100, 100, 2) )

    scale = cv.info['scales'][0]
    scale = copy.deepcopy(scale)
    scale['voxel_offset'] = [ 3, 0, 0 ]
    scale['volume_size'] = [ 512, 512, 5 ]
    scale['resolution'] = [ 2, 2, 1 ]
    scale['key'] = '2_2_1'
    cv.info['scales'].append(scale)
    cv.commit_info()

    assert len(cv.available_mips) == 2

    cv.mip = 1
    slices = cv.slices_from_global_coords( Bbox( (100, 100, 1), (500, 512, 2) ) )
    result = Bbox.from_slices(slices)
    assert result == Bbox( (50, 50, 1), (250, 256, 2) )

    cv.mip = 0
    slices = cv.slices_from_global_coords( Bbox( (100, 100, 1), (500, 512, 2) ) )
    result = Bbox.from_slices(slices)
    assert result == Bbox( (100, 100, 1), (500, 512, 2) )

    slices = cv.slices_from_global_coords( np.s_[:,:,:] )
    result = Bbox.from_slices(slices)
    assert result == Bbox( (7, 0, 0), ( 1031, 1024, 5) )
Exemplo n.º 5
0
  def execute(self):
    srcvol = CloudVolume(self.source_layer_path, mip=0, fill_missing=self.fill_missing)

    bounds = Bbox( self.offset, self.shape + self.offset )
    bounds = Bbox.clamp(bounds, srcvol.bounds)

    image = srcvol[ bounds.to_slices() ][ :, :, :, :1 ] # only use x affinity
    image = (image * 255.0).astype(np.uint8)

    destvol = CloudVolume(self.dest_layer_path, mip=0)
    downsample_and_upload(image, bounds, destvol, self.shape, mip=0, axis='z')
Exemplo n.º 6
0
 def execute(self):
   vol = CloudVolume(self.layer_path, self.mip,
                     fill_missing=self.fill_missing)
   bounds = Bbox(self.offset, self.shape + self.offset)
   bounds = Bbox.clamp(bounds, vol.bounds)
   image = vol[ bounds.to_slices() ]
   downsample_and_upload(
     image, bounds, vol, 
     self.shape, self.mip, self.axis, 
     skip_first=True, sparse=self.sparse
   )
Exemplo n.º 7
0
def BlackoutTask(
  cloudpath, mip, shape, offset,
  value=0, non_aligned_writes=False
):
  shape = Vec(*shape)
  offset = Vec(*offset)
  vol = CloudVolume(cloudpath, mip, non_aligned_writes=non_aligned_writes)
  bounds = Bbox(offset, shape + offset)
  bounds = Bbox.clamp(bounds, vol.bounds)
  img = np.zeros(bounds.size3(), dtype=vol.dtype) + value
  vol[bounds] = img
Exemplo n.º 8
0
    def execute(self):
        vol = CloudVolume(self.layer_path)

        highres_bbox = Bbox(self.offset, self.offset + self.shape)

        for mip in vol.available_mips:
            vol.mip = mip
            slices = vol.slices_from_global_coords(highres_bbox.to_slices())
            bbox = Bbox.from_slices(slices).round_to_chunk_size(
                vol.underlying, offset=vol.bounds.minpt)
            vol.delete(bbox)
Exemplo n.º 9
0
def compute_build_bounding_box(storage, prefix='build/'):
    bboxes = []
    for filename in tqdm(storage.list_files(prefix=prefix), desc='Computing Bounds'):
        bbox = Bbox.from_filename(filename) 
        bboxes.append(bbox)

    bounds = Bbox.expand(*bboxes)
    chunk_size = reduce(max2, map(lambda bbox: bbox.size3(), bboxes))

    print('bounds={} (size: {}); chunk_size={}'.format(bounds, bounds.size3(), chunk_size))
  
    return bounds, chunk_size
Exemplo n.º 10
0
    def execute(self):
        self.cv = CloudVolume(
            self.cloudpath,
            mip=self.mip,
            bounded=False,
            fill_missing=self.options['fill_missing'],
            mesh_dir=self.options['mesh_dir'],
        )

        if self.cv.mesh.meta.is_sharded() == False:
            raise ValueError("The mesh sharding parameter must be defined.")

        self.bounds = Bbox(self.offset, self.shape + self.offset)
        self.bounds = Bbox.clamp(self.bounds, self.cv.bounds)

        self.progress = bool(self.options['progress'])

        self.mesher = zmesh.Mesher(self.cv.resolution)

        # Marching cubes needs 1 voxel overlap to properly
        # stitch adjacent meshes.
        # data_bounds = self.bounds.clone()
        # data_bounds.maxpt += self.overlap_vx

        self.mesh_dir = self.get_mesh_dir()
        self.draco_encoding_settings = draco_encoding_settings(
            shape=(self.shape + self.overlap_vx),
            offset=self.offset,
            resolution=self.cv.resolution,
            compression_level=1,
            create_metadata=True,
            uses_new_draco_bin_size=self.cv.meta.uses_new_draco_bin_size,
        )

        chunk_pos = self.cv.meta.point_to_chunk_position(self.bounds.center(),
                                                         mip=self.mip)

        img = mesh_graphene_remap.remap_segmentation(
            self.cv,
            chunk_pos.x,
            chunk_pos.y,
            chunk_pos.z,
            mip=self.mip,
            overlap_vx=self.overlap_vx,
            time_stamp=self.timestamp,
            progress=self.progress,
        )

        if not np.any(img):
            return

        self.upload_meshes(self.compute_meshes(img))
Exemplo n.º 11
0
def create_blackout_tasks(cloudpath: str,
                          bounds: Bbox,
                          mip: int = 0,
                          shape: ShapeType = (2048, 2048, 64),
                          value: int = 0,
                          non_aligned_writes: bool = False):

    vol = CloudVolume(cloudpath, mip=mip)

    shape = Vec(*shape)
    bounds = Bbox.create(bounds)
    bounds = vol.bbox_to_mip(bounds, mip=0, to_mip=mip)

    if not non_aligned_writes:
        bounds = bounds.expand_to_chunk_size(vol.chunk_size, vol.voxel_offset)

    bounds = Bbox.clamp(bounds, vol.mip_bounds(mip))

    class BlackoutTaskIterator(FinelyDividedTaskIterator):
        def task(self, shape, offset):
            bounded_shape = min2(shape, vol.bounds.maxpt - offset)
            return partial(
                igneous.tasks.BlackoutTask,
                cloudpath=cloudpath,
                mip=mip,
                shape=shape.clone(),
                offset=offset.clone(),
                value=value,
                non_aligned_writes=non_aligned_writes,
            )

        def on_finish(self):
            vol.provenance.processing.append({
                'method': {
                    'task': 'BlackoutTask',
                    'cloudpath': cloudpath,
                    'mip': mip,
                    'non_aligned_writes': non_aligned_writes,
                    'value': value,
                    'shape': shape.tolist(),
                    'bounds': [
                        bounds.minpt.tolist(),
                        bounds.maxpt.tolist(),
                    ],
                },
                'by':
                operator_contact(),
                'date':
                strftime('%Y-%m-%d %H:%M %Z'),
            })

    return BlackoutTaskIterator(bounds, shape)
Exemplo n.º 12
0
def child_upload_process(
    meta,
    cache,
    img_shape,
    offset,
    mip,
    compress,
    cdn_cache,
    progress,
    location,
    location_bbox,
    location_order,
    delete_black_uploads,
    background_color,
    green,
    chunk_ranges,
    compress_level=None,
):
    global fs_lock
    reset_connection_pools()

    shared_shape = img_shape
    if location_bbox:
        shared_shape = list(location_bbox.size3()) + [meta.num_channels]

    array_like, renderbuffer = shm.ndarray(shape=shared_shape,
                                           dtype=meta.dtype,
                                           location=location,
                                           order=location_order,
                                           lock=fs_lock,
                                           readonly=True)

    if location_bbox:
        cutout_bbox = Bbox(offset, offset + img_shape[:3])
        delta_box = cutout_bbox.clone() - location_bbox.minpt
        renderbuffer = renderbuffer[delta_box.to_slices()]

    threaded_upload_chunks(
        meta,
        cache,
        renderbuffer,
        mip,
        chunk_ranges,
        compress=compress,
        cdn_cache=cdn_cache,
        progress=progress,
        delete_black_uploads=delete_black_uploads,
        background_color=background_color,
        green=green,
        compress_level=compress_level,
    )
    array_like.close()
Exemplo n.º 13
0
def upload_build_chunks(storage, volume, offset=[0, 0, 0], build_chunk_size=[1024,1024,128]):
  offset = Vec(*offset)
  shape = Vec(*volume.shape[:3])
  build_chunk_size = Vec(*build_chunk_size)

  for spt in xyzrange( (0,0,0), shape, build_chunk_size):
    ept = min2(spt + build_chunk_size, shape)
    bbox = Bbox(spt, ept)
    chunk = volume[ bbox.to_slices() ]
    bbox += offset
    filename = 'build/{}'.format(bbox.to_filename())
    storage.put_file(filename, chunks.encode_npz(chunk))
  storage.wait()
Exemplo n.º 14
0
  def select_bounding_boxes(self, dataset_bounds):
    # Sample 1024x1024x1 patches until coverage factor is
    # satisfied. Ensure the patches are non-overlapping and
    # random.
    sample_shape = Bbox( (0,0,0), (2048, 2048, 1) )
    area = self.shape.rectVolume()

    total_patches = int(math.ceil(area / sample_shape.volume()))
    N = int(math.ceil(float(total_patches) * self.coverage_factor))

    # Simplification: We are making patch selection against a discrete
    # grid instead of a continuous space. This removes the influence of
    # overlap in a less complex fashion.
    patch_indicies = set()
    while len(patch_indicies) < N:
      ith_patch = random.randint(0, (total_patches - 1))
      patch_indicies.add(ith_patch)

    gridx = int(math.ceil(self.shape.x / sample_shape.size3().x))

    bboxes = []
    for i in patch_indicies:
      patch_start = Vec( i % gridx, i // gridx, 0 )
      patch_start *= sample_shape.size3()
      patch_start += self.offset
      bbox = Bbox( patch_start, patch_start + sample_shape.size3() )
      bbox = Bbox.clamp(bbox, dataset_bounds)
      bboxes.append(bbox)
    return bboxes
Exemplo n.º 15
0
def create_blackout_tasks(cloudpath,
                          bounds,
                          mip=0,
                          shape=(2048, 2048, 64),
                          value=0,
                          non_aligned_writes=False):

    vol = CloudVolume(cloudpath, mip=mip)

    shape = Vec(*shape)
    bounds = Bbox.create(bounds)
    bounds = vol.bbox_to_mip(bounds, mip=0, to_mip=mip)
    bounds = Bbox.clamp(bounds, vol.mip_bounds(mip))

    class BlackoutTaskIterator():
        def __len__(self):
            return num_tasks(bounds, shape)

        def __iter__(self):
            for startpt in xyzrange(bounds.minpt, bounds.maxpt, shape):
                bounded_shape = min2(shape, vol.bounds.maxpt - startpt)
                yield igneous.tasks.BlackoutTask(
                    cloudpath=cloudpath,
                    mip=mip,
                    shape=shape.clone(),
                    offset=startpt.clone(),
                    value=value,
                    non_aligned_writes=non_aligned_writes,
                )

            vol.provenance.processing.append({
                'method': {
                    'task': 'BlackoutTask',
                    'cloudpath': cloudpath,
                    'mip': mip,
                    'non_aligned_writes': non_aligned_writes,
                    'value': value,
                    'shape': shape.tolist(),
                    'bounds': [
                        bounds.minpt.tolist(),
                        bounds.maxpt.tolist(),
                    ],
                },
                'by':
                OPERATOR_CONTACT,
                'date':
                strftime('%Y-%m-%d %H:%M %Z'),
            })

    return BlackoutTaskIterator()
Exemplo n.º 16
0
def create_touch_tasks(self,
                       cloudpath,
                       mip=0,
                       shape=(2048, 2048, 64),
                       bounds=None):

    vol = CloudVolume(cloudpath, mip=mip)

    shape = Vec(*shape)

    if bounds is None:
        bounds = vol.bounds.clone()

    bounds = Bbox.create(bounds)
    bounds = vol.bbox_to_mip(bounds, mip=0, to_mip=mip)
    bounds = Bbox.clamp(bounds, vol.mip_bounds(mip))

    class TouchTaskIterator():
        def __len__(self):
            return num_tasks(bounds, shape)

        def __iter__(self):
            for startpt in xyzrange(bounds.minpt, bounds.maxpt, shape):
                bounded_shape = min2(shape, vol.bounds.maxpt - startpt)
                yield igneous.tasks.TouchTask(
                    cloudpath=cloudpath,
                    shape=bounded_shape.clone(),
                    offset=startpt.clone(),
                    mip=mip,
                )

            vol.provenance.processing.append({
                'method': {
                    'task': 'TouchTask',
                    'mip': mip,
                    'shape': shape.tolist(),
                    'bounds': [
                        bounds.minpt.tolist(),
                        bounds.maxpt.tolist(),
                    ],
                },
                'by':
                OPERATOR_CONTACT,
                'date':
                strftime('%Y-%m-%d %H:%M %Z'),
            })
            vol.commit_provenance()

    return TouchTaskIterator()
Exemplo n.º 17
0
def get_bounds(vol, bounds, shape, mip, chunk_size=None):
    if bounds is None:
        bounds = vol.bounds.clone()
    else:
        bounds = Bbox.create(bounds)
        bounds = vol.bbox_to_mip(bounds, mip=0, to_mip=mip)
        if chunk_size is not None:
            bounds = bounds.expand_to_chunk_size(chunk_size,
                                                 vol.mip_voxel_offset(mip))
        bounds = Bbox.clamp(bounds, vol.mip_bounds(mip))

    print("Volume Bounds: ", vol.mip_bounds(mip))
    print("Selected ROI:  ", bounds)

    return bounds
Exemplo n.º 18
0
def test_compute_build_bounding_box():
    delete_layer()
    storage = create_storage()
    random_data = np.random.randint(255, size=(256, 256, 128), dtype=np.uint8)

    # Easy, power of two, 0 offsets
    task_creation.upload_build_chunks(storage,
                                      random_data,
                                      offset=(0, 0, 0),
                                      build_chunk_size=(128, 128, 32))
    bounds, chunk_size = task_creation.compute_build_bounding_box(storage)

    assert np.all(chunk_size == (128, 128, 32))
    assert bounds == Bbox((0, 0, 0), (256, 256, 128))

    # Prime offsets
    delete_layer()
    task_creation.upload_build_chunks(storage,
                                      random_data,
                                      offset=(3, 23, 5),
                                      build_chunk_size=(128, 128, 32))
    bounds, chunk_size = task_creation.compute_build_bounding_box(storage)

    assert np.all(chunk_size == (128, 128, 32))
    assert bounds == Bbox((3, 23, 5), (256 + 3, 256 + 23, 128 + 5))

    # Non-power of two edges, 0 offsets
    random_data2 = random_data[:100, :100, :106]
    delete_layer()
    task_creation.upload_build_chunks(storage,
                                      random_data2,
                                      offset=(0, 0, 0),
                                      build_chunk_size=(32, 32, 32))
    bounds, chunk_size = task_creation.compute_build_bounding_box(storage)

    assert np.all(chunk_size == (32, 32, 32))
    assert bounds == Bbox((0, 0, 0), (100, 100, 106))

    # Non-power of two edges, offsets
    delete_layer()
    task_creation.upload_build_chunks(storage,
                                      random_data2,
                                      offset=(3, 23, 5),
                                      build_chunk_size=(64, 64, 32))
    bounds, chunk_size = task_creation.compute_build_bounding_box(storage)

    assert np.all(chunk_size == (64, 64, 32))
    assert bounds == Bbox((3, 23, 5), (100 + 3, 100 + 23, 106 + 5))
Exemplo n.º 19
0
  def fetch_z_levels(self):
    bounds = Bbox(self.offset, self.shape[:3] + self.offset)

    levelfilenames = [
      'levels/{}/{}'.format(self.mip, z) \
      for z in range(bounds.minpt.z, bounds.maxpt.z)
    ]
    
    with Storage(self.levels_path) as stor:
      levels = stor.get_files(levelfilenames)

    errors = [ 
      level['filename'] \
      for level in levels if level['content'] == None
    ]

    if len(errors):
      raise Exception(", ".join(
          errors) + " were not defined. Did you run a LuminanceLevelsTask for these slices?")

    levels = [(
      int(os.path.basename(item['filename'])),
      json.loads(item['content'].decode('utf-8'))
    ) for item in levels ]

    levels.sort(key=lambda x: x[0])
    levels = [x[1] for x in levels]
    return [ np.array(x['levels'], dtype=np.uint64) for x in levels ]
Exemplo n.º 20
0
    def _create_thumbnail(self, chunk):
        logging.info('creating thumbnail...')

        thumbnail_layer_path = os.path.join(self.volume_path, 'thumbnail')
        thumbnail_volume = CloudVolume(thumbnail_layer_path,
                                       compress='gzip',
                                       fill_missing=True,
                                       bounded=False,
                                       autocrop=True,
                                       mip=self.mip,
                                       cache=False,
                                       green_threads=True,
                                       progress=False)

        # only use the last channel, it is the Z affinity
        # if this is affinitymap
        image = chunk[-1, :, :, :]
        if np.issubdtype(image.dtype, np.floating):
            image = (image * 255).astype(np.uint8)

        #self.thumbnail_operator(image)
        # transpose to xyzc
        image = np.transpose(image)
        image_bbox = Bbox.from_slices(chunk.slices[::-1][:3])

        downsample_and_upload(image,
                              image_bbox,
                              thumbnail_volume,
                              Vec(*(image.shape)),
                              mip=self.mip,
                              max_mip=6,
                              axis='z',
                              skip_first=True,
                              only_last_mip=True)
Exemplo n.º 21
0
def agglomerate_group(seg_map, merge_output, gid=None, relabel=True):
    if seg_map == {}:
        return {}
    G = nx.Graph()
    G.add_nodes_from(seg_map.keys())

    # find neighbors
    for k1, k2 in itertools.product(seg_map.keys(), seg_map.keys()):
        bb1, p1 = seg_map[k1]['bbox'], seg_map[k1]['output']
        bb2, p2 = seg_map[k2]['bbox'], seg_map[k2]['output']
        int_bb = Bbox.intersection(bb1, bb2)
        if not int_bb.empty() and k1 != k2:
            G.add_edge(k1, k2)

    # agglomerate each pair and rewrite cloud volume
    for k1, k2 in G.edges():
        p1 = seg_map[k1]['output']
        p2 = seg_map[k2]['output']
        if relabel:
            remap_label = agglomerate(p1,
                                      p2,
                                      contiguous=False,
                                      inplace=True,
                                      no_zero=True)
    return merge(seg_map, merge_output, gid)
Exemplo n.º 22
0
        def __iter__(self):
            for x, y, z in xyzrange(grid_size):
                output_bounds = Bbox.from_slices(
                    tuple(
                        slice(s + x * b, s + x * b + b)
                        for (s, x, b) in zip(output_block_start, (
                            z, y, x), output_block_size)))
                yield MaskAffinitymapTask(
                    aff_input_layer_path=aff_input_layer_path,
                    aff_output_layer_path=aff_output_layer_path,
                    aff_mip=aff_mip,
                    mask_layer_path=mask_layer_path,
                    mask_mip=mask_mip,
                    output_bounds=output_bounds,
                )

            vol = CloudVolume(output_layer_path, mip=aff_mip)
            vol.provenance.processing.append({
                'method': {
                    'task': 'InferenceTask',
                    'aff_input_layer_path': aff_input_layer_path,
                    'aff_output_layer_path': aff_output_layer_path,
                    'aff_mip': aff_mip,
                    'mask_layer_path': mask_layer_path,
                    'mask_mip': mask_mip,
                    'output_block_start': output_block_start,
                    'output_block_size': output_block_size,
                    'grid_size': grid_size,
                },
                'by':
                OPERATOR_CONTACT,
                'date':
                strftime('%Y-%m-%d %H:%M %Z'),
            })
            vol.commit_provenance()
Exemplo n.º 23
0
    def execute(self):
        vol = CloudVolume(self.cloudpath,
                          mip=self.mip,
                          info=self.info,
                          cdn_cache=False)
        bbox = Bbox.clamp(self.bounds, vol.bounds)

        path = skeldir(self.cloudpath)
        path = os.path.join(self.cloudpath, path)

        all_labels = vol[bbox.to_slices()]
        all_labels = all_labels[:, :, :, 0]

        skeletons = kimimaro.skeletonize(all_labels,
                                         self.teasar_params,
                                         object_ids=self.object_ids,
                                         anisotropy=vol.resolution,
                                         dust_threshold=1000,
                                         cc_safety_factor=0.25,
                                         progress=False,
                                         fix_branching=self.fix_branching)

        for segid, skel in six.iteritems(skeletons):
            skel.vertices[:, 0] += bbox.minpt.x * vol.resolution.x
            skel.vertices[:, 1] += bbox.minpt.y * vol.resolution.y
            skel.vertices[:, 2] += bbox.minpt.z * vol.resolution.z

        self.upload(vol, path, bbox, skeletons.values())
Exemplo n.º 24
0
    def from_array(cls, arr: np.ndarray):
        bboxes = []
        for idx in range(arr.shape(0)):
            bbox = Bbox.from_vec(arr[idx, :])
            bboxes.append(bbox)

        return cls(bboxes)
Exemplo n.º 25
0
    def _create_mesh(self, obj_id, left_bound_offset):
        mesh = self._mesher.get_mesh(
            obj_id,
            simplification_factor=self.options['simplification_factor'],
            max_simplification_error=self.options['max_simplification_error'],
            voxel_centered=True,
        )

        self._mesher.erase(obj_id)

        resolution = self._volume.resolution
        offset = (self._bounds.minpt - self.options['low_padding']).astype(
            np.float32)
        mesh.vertices[:] += (offset - left_bound_offset) * resolution

        mesh_bounds = Bbox(np.amin(mesh.vertices, axis=0),
                           np.amax(mesh.vertices, axis=0))

        if self.options['encoding'] == 'draco':
            mesh_binary = DracoPy.encode(mesh.vertices, mesh.faces,
                                         **self.draco_encoding_settings)
        elif self.options['encoding'] == 'precomputed':
            mesh_binary = mesh.to_precomputed()

        return mesh_binary, mesh_bounds
Exemplo n.º 26
0
    def crop(self, bbox):
        """
    Crop away all vertices and edges that lie outside of the given bbox.
    The edge counts as inside.

    Returns: new PrecomputedSkeleton
    """
        skeleton = self.clone()
        bbox = Bbox.create(bbox)

        if skeleton.empty():
            return skeleton

        nodes_valid_mask = np.array(
            [bbox.contains(vtx) for vtx in skeleton.vertices], dtype=np.bool)
        nodes_valid_idx = np.where(nodes_valid_mask)[0]

        # Set invalid vertices to be duplicates
        # so they'll be removed during consolidation
        if nodes_valid_idx.shape[0] == 0:
            return PrecomputedSkeleton()

        first_node = nodes_valid_idx[0]
        skeleton.vertices[~nodes_valid_mask] = skeleton.vertices[first_node]

        edges_valid_mask = np.isin(skeleton.edges, nodes_valid_idx)
        edges_valid_idx = edges_valid_mask[:, 0] * edges_valid_mask[:, 1]
        skeleton.edges = skeleton.edges[edges_valid_idx, :]
        return skeleton.consolidate()
Exemplo n.º 27
0
  def execute(self):
    vol = CloudVolume(
      self.cloudpath, mip=self.mip, 
      info=self.info, cdn_cache=False,
      parallel=self.parallel
    )
    bbox = Bbox.clamp(self.bounds, vol.bounds)

    path = skeldir(self.cloudpath)
    path = os.path.join(self.cloudpath, path)

    all_labels = vol[ bbox.to_slices() ]
    all_labels = all_labels[:,:,:,0]

    if self.mask_ids:
      all_labels = fastremap.mask(all_labels, self.mask_ids)

    skeletons = kimimaro.skeletonize(
      all_labels, self.teasar_params, 
      object_ids=self.object_ids, anisotropy=vol.resolution,
      dust_threshold=self.dust_threshold, cc_safety_factor=0.25,
      progress=self.progress, 
      fix_branching=self.fix_branching,
      fix_borders=self.fix_borders,
      parallel=self.parallel,
    )

    for segid, skel in six.iteritems(skeletons):
      skel.vertices[:] += bbox.minpt * vol.resolution
      
    self.upload(vol, path, bbox, skeletons.values())
Exemplo n.º 28
0
    def __call__(self, chunk):
        assert 3 == chunk.ndim
        voxel_offset = chunk.voxel_offset

        num_mips = self.stop_mip - self.chunk_mip
        # tinybrain use F order and require 4D array!
        chunk2 = np.transpose(chunk)
        # chunk2 = np.reshape(chunk2, (*chunk2.shape, 1))
        chunk2 = np.expand_dims(chunk2, 3)

        if np.issubdtype(chunk.dtype, np.floating) or chunk.dtype == np.uint8:
            pyramid = tinybrain.downsample_with_averaging(
                chunk2, factor=self.factor[::-1], num_mips=num_mips)
        else:
            pyramid = tinybrain.downsample_segmentation(
                chunk2, factor=self.factor[::-1], num_mips=num_mips)

        for mip in range(self.start_mip, self.stop_mip):
            # the first chunk in pyramid is already downsampled!
            downsampled_chunk = pyramid[mip - self.chunk_mip - 1]
            # compute new offset, only downsample the y,x dimensions
            offset = np.divide(
                voxel_offset,
                np.asarray([
                    self.factor[0]**(mip - self.chunk_mip),
                    self.factor[1]**(mip - self.chunk_mip),
                    self.factor[2]**(mip - self.chunk_mip)
                ]))
            bbox = Bbox.from_delta(offset, downsampled_chunk.shape[0:3][::-1])
            # upload downsampled chunk, note that we should use F order in the indexing
            self.vols[mip][bbox.to_slices()[::-1]] = downsampled_chunk
Exemplo n.º 29
0
def test_bbox_hashing():
    bbx = Bbox.from_list([1, 2, 3, 4, 5, 6])
    d = {}
    d[bbx] = 1

    assert len(d) == 1
    for k, v in d.items():
        assert v == 1

    bbx = Bbox((1., 1.3, 2.), (3., 4., 4.))
    d = {}
    d[bbx] = 1

    assert len(d) == 1
    for k, v in d.items():
        assert v == 1
Exemplo n.º 30
0
def to_volumecutout(img,
                    image_type,
                    resolution=None,
                    offset=None,
                    hostname='localhost'):
    from cloudvolume.volumecutout import VolumeCutout
    if type(img) == VolumeCutout:
        try:
            img.dataset_name  # check if it's an intact VolumeCutout
            return img
        except AttributeError:
            pass

    resolution = getresolution(img, resolution)
    offset = getoffset(img, offset)

    return VolumeCutout(
        buf=img,
        path=ExtractedPath('mem', hostname, '/', '', '', '', ''),
        cloudpath='IN MEMORY',
        resolution=resolution,
        mip=-1,
        layer_type=image_type,
        bounds=Bbox(offset, offset + Vec(*(img.shape[:3]))),
        handle=None,
    )