def execute(self): srccv = CloudVolume(self.src_path, fill_missing=self.fill_missing, mip=self.mip) destcv = CloudVolume(self.dest_path, fill_missing=self.fill_missing, mip=self.mip) bounds = Bbox(self.offset, self.shape + self.offset) bounds = Bbox.clamp(bounds, srccv.bounds) image = srccv[bounds.to_slices()] bounds += self.translate bounds = Bbox.clamp(bounds, destcv.bounds) downsample_and_upload(image, bounds, destcv, self.shape, mip=self.mip)
def get_bounds(vol, bounds, mip, bounds_mip=0, chunk_size=None): """Return bounds of vol at mip, or snap bounds at src_mip to chunk_size Args: vol (CloudVolume) bounds (Bbox-like object) mip (int): mip level of returned bounds bounds_mip (int): mip level of input bounds chunk_size (Vec-like object): if bounds are set, can set chunk_size for snapping Returns: Bbox for bounds """ if bounds is None: bounds = vol.meta.bounds(mip) else: bounds = Bbox.create(bounds) bounds = vol.bbox_to_mip(bounds, mip=bounds_mip, to_mip=mip) if chunk_size is not None: bounds = bounds.expand_to_chunk_size(chunk_size, vol.meta.voxel_offset(mip)) bounds = Bbox.clamp(bounds, vol.meta.bounds(mip)) print("Volume Bounds: ", vol.meta.bounds(mip)) print("Selected ROI: ", bounds) return bounds
def execute(self): vol = CloudVolume( self.cloudpath, mip=self.mip, info=self.info, cdn_cache=False, parallel=self.parallel ) bbox = Bbox.clamp(self.bounds, vol.bounds) path = skeldir(self.cloudpath) path = os.path.join(self.cloudpath, path) all_labels = vol[ bbox.to_slices() ] all_labels = all_labels[:,:,:,0] if self.mask_ids: all_labels = fastremap.mask(all_labels, self.mask_ids) skeletons = kimimaro.skeletonize( all_labels, self.teasar_params, object_ids=self.object_ids, anisotropy=vol.resolution, dust_threshold=self.dust_threshold, cc_safety_factor=0.25, progress=self.progress, fix_branching=self.fix_branching, fix_borders=self.fix_borders, parallel=self.parallel, ) for segid, skel in six.iteritems(skeletons): skel.vertices[:] += bbox.minpt * vol.resolution self.upload(vol, path, bbox, skeletons.values())
def ImageShardTransferTask( src_path: str, dst_path: str, shape: ShapeType, offset: ShapeType, mip: int = 0, fill_missing: bool = False, translate: ShapeType = (0, 0, 0), agglomerate: bool = False, timestamp: Optional[int] = None, ): """ Generates a sharded image volume from a preexisting CloudVolume readable data source. Downsamples are not generated. The sharded specification can be read here: Shard Container: https://github.com/google/neuroglancer/blob/056a3548abffc3c76c93c7a906f1603ce02b5fa3/src/neuroglancer/datasource/precomputed/sharded.md Sharded Images: https://github.com/google/neuroglancer/blob/056a3548abffc3c76c93c7a906f1603ce02b5fa3/src/neuroglancer/datasource/precomputed/volume.md#unsharded-chunk-storage """ shape = Vec(*shape) offset = Vec(*offset) mip = int(mip) fill_missing = bool(fill_missing) translate = Vec(*translate) src_vol = CloudVolume( src_path, fill_missing=fill_missing, mip=mip, bounded=False ) dst_vol = CloudVolume( dst_path, fill_missing=fill_missing, mip=mip, compress=None ) dst_bbox = Bbox(offset, offset + shape) dst_bbox = Bbox.clamp(dst_bbox, dst_vol.meta.bounds(mip)) dst_bbox = dst_bbox.expand_to_chunk_size( dst_vol.meta.chunk_size(mip), offset=dst_vol.meta.voxel_offset(mip) ) src_bbox = dst_bbox - translate img = src_vol.download( src_bbox, agglomerate=agglomerate, timestamp=timestamp ) (filename, shard) = dst_vol.image.make_shard( img, dst_bbox, mip, progress=False ) del img basepath = dst_vol.meta.join( dst_vol.cloudpath, dst_vol.meta.key(mip) ) CloudFiles(basepath).put(filename, shard)
def execute(self): vol = CloudVolume(self.layer_path, self.mip, fill_missing=self.fill_missing) bounds = Bbox( self.offset, self.shape + self.offset ) bounds = Bbox.clamp(bounds, vol.bounds) image = vol[ bounds.to_slices() ] downsample_and_upload(image, bounds, vol, self.shape, self.mip, self.axis, skip_first=True, zero_as_background=self.zero_as_background)
def execute(self): srccv = CloudVolume(self.src_path, fill_missing=self.fill_missing, mip=self.mip) destcv = CloudVolume(self.dest_path, fill_missing=self.fill_missing, mip=self.mip) bounds = Bbox( self.offset, self.shape[:3] + self.offset ) bounds = Bbox.clamp(bounds, srccv.bounds) image = srccv[ bounds.to_slices() ].astype(np.float32) zlevels = self.fetch_z_levels() nbits = np.dtype(srccv.dtype).itemsize * 8 maxval = float(2 ** nbits - 1) for z in range(bounds.minpt.z, bounds.maxpt.z): imagez = z - bounds.minpt.z zlevel = zlevels[ imagez ] (lower, upper) = self.find_section_clamping_values(zlevel, self.clip_fraction, 1 - self.clip_fraction) if lower == upper: continue img = image[:,:,imagez] img = (img - float(lower)) * (maxval / (float(upper) - float(lower))) image[:,:,imagez] = img image = np.round(image) image = np.clip(image, 0.0, maxval).astype(destcv.dtype) bounds += self.translate downsample_and_upload(image, bounds, destcv, self.shape)
def execute(self): self._mesher = Mesher() self._volume = CloudVolume(self.layer_path, self.mip, bounded=False) self._bounds = Bbox( self.offset, self.shape + self.offset ) self._bounds = Bbox.clamp(self._bounds, self._volume.bounds) # Marching cubes loves its 1vx overlaps. # This avoids lines appearing between # adjacent chunks. data_bounds = self._bounds.clone() data_bounds.minpt -= 1 data_bounds.maxpt += 1 self._mesh_dir = None if 'meshing' in self._volume.info: self._mesh_dir = self._volume.info['meshing'] elif 'mesh' in self._volume.info: self._mesh_dir = self._volume.info['mesh'] if not self._mesh_dir: raise ValueError("The mesh destination is not present in the info file.") self._data = self._volume[data_bounds.to_slices()] # chunk_position includes a 1 pixel overlap self._compute_meshes()
def select_bounding_boxes(self, dataset_bounds): # Sample 1024x1024x1 patches until coverage factor is # satisfied. Ensure the patches are non-overlapping and # random. sample_shape = Bbox( (0,0,0), (2048, 2048, 1) ) area = self.shape.rectVolume() total_patches = int(math.ceil(area / sample_shape.volume())) N = int(math.ceil(float(total_patches) * self.coverage_factor)) # Simplification: We are making patch selection against a discrete # grid instead of a continuous space. This removes the influence of # overlap in a less complex fashion. patch_indicies = set() while len(patch_indicies) < N: ith_patch = random.randint(0, (total_patches - 1)) patch_indicies.add(ith_patch) gridx = int(math.ceil(self.shape.x / sample_shape.size3().x)) bboxes = [] for i in patch_indicies: patch_start = Vec( i % gridx, i // gridx, 0 ) patch_start *= sample_shape.size3() patch_start += self.offset bbox = Bbox( patch_start, patch_start + sample_shape.size3() ) bbox = Bbox.clamp(bbox, dataset_bounds) bboxes.append(bbox) return bboxes
def execute(self): srccv = CloudVolume(self.src_path) destcv = CloudVolume(self.dest_path) bounds = Bbox( self.offset, self.shape + self.offset ) bounds = Bbox.clamp(bounds, srccv.bounds) remap = self._get_map() watershed_data = srccv[ bounds.to_slices() ] # Here's how the remapping works. Numpy has a special # indexing that can be used to perform the remap. # The remap array is a key:value mapping where the # array index is the key and the value is the contents. # The watershed_data array contains only data values that # are within the length of the remap array. # # e.g. # # remap = np.array([1,2,3]) # i.e. 0=>1, 1=>2, 1=>3 # vals = np.array([0,1,1,1,2,0,2,1,2]) # # remap[vals] # array([1, 2, 2, 2, 3, 1, 3, 2, 3]) image = remap[watershed_data] downsample_and_upload(image, bounds, destcv, self.shape)
def execute(self): vol = CloudVolume(self.cloudpath, mip=self.mip, info=self.info, cdn_cache=False) bbox = Bbox.clamp(self.bounds, vol.bounds) path = skeldir(self.cloudpath) path = os.path.join(self.cloudpath, path) all_labels = vol[bbox.to_slices()] all_labels = all_labels[:, :, :, 0] skeletons = kimimaro.skeletonize(all_labels, self.teasar_params, object_ids=self.object_ids, anisotropy=vol.resolution, dust_threshold=1000, cc_safety_factor=0.25, progress=False, fix_branching=self.fix_branching) for segid, skel in six.iteritems(skeletons): skel.vertices[:, 0] += bbox.minpt.x * vol.resolution.x skel.vertices[:, 1] += bbox.minpt.y * vol.resolution.y skel.vertices[:, 2] += bbox.minpt.z * vol.resolution.z self.upload(vol, path, bbox, skeletons.values())
def execute(self): self._volume = CloudVolume( self.layer_path, self.options['mip'], bounded=False, parallel=self.options['parallel_download']) self._bounds = Bbox(self.offset, self.shape + self.offset) self._bounds = Bbox.clamp(self._bounds, self._volume.bounds) self._mesher = Mesher(self._volume.resolution) # Marching cubes loves its 1vx overlaps. # This avoids lines appearing between # adjacent chunks. data_bounds = self._bounds.clone() data_bounds.minpt -= self.options['low_padding'] data_bounds.maxpt += self.options['high_padding'] self._mesh_dir = None if self.options['mesh_dir'] is not None: self._mesh_dir = self.options['mesh_dir'] elif 'mesh' in self._volume.info: self._mesh_dir = self._volume.info['mesh'] if not self._mesh_dir: raise ValueError("The mesh destination is not present in the info file.") # chunk_position includes the overlap specified by low_padding/high_padding self._data = self._volume[data_bounds.to_slices()] self._remap() self._compute_meshes()
def generate_chunks(meta, img, offset, mip): shape = Vec(*img.shape)[:3] offset = Vec(*offset)[:3] bounds = Bbox(offset, shape + offset) alignment_check = bounds.round_to_chunk_size(meta.chunk_size(mip), meta.voxel_offset(mip)) if not np.all(alignment_check.minpt == bounds.minpt): raise AlignmentError(""" Only chunk aligned writes are supported by this function. Got: {} Volume Offset: {} Nearest Aligned: {} """.format(bounds, meta.voxel_offset(mip), alignment_check)) bounds = Bbox.clamp(bounds, meta.bounds(mip)) img_offset = bounds.minpt - offset img_end = Vec.clamp(bounds.size3() + img_offset, Vec(0, 0, 0), shape) for startpt in xyzrange(img_offset, img_end, meta.chunk_size(mip)): startpt = startpt.clone() endpt = min2(startpt + meta.chunk_size(mip), shape) spt = (startpt + bounds.minpt).astype(int) ept = (endpt + bounds.minpt).astype(int) yield (startpt, endpt, spt, ept)
def MeshSpatialIndex( cloudpath:str, shape:Tuple[int,int,int], offset:Tuple[int,int,int], mip:int = 0, fill_missing:bool=False, compress:Optional[Union[str,bool]] = 'gzip', mesh_dir:Optional[str] = None ) -> None: """ The main way to add a spatial index is to use the MeshTask, but old datasets or broken datasets may need it to be reconstituted. An alternative use is create the spatial index over a different area size than the mesh task. """ cv = CloudVolume( cloudpath, mip=mip, bounded=False, fill_missing=fill_missing ) cf = CloudFiles(cloudpath) bounds = Bbox(Vec(*offset), Vec(*shape) + Vec(*offset)) bounds = Bbox.clamp(bounds, cv.bounds) data_bounds = bounds.clone() data_bounds.maxpt += 1 # match typical Marching Cubes overlap precision = cv.mesh.spatial_index.precision resolution = cv.resolution if not mesh_dir: mesh_dir = cv.info["mesh"] # remap: old img -> img img, remap = cv.download(data_bounds, renumber=True) img = img[...,0] slcs = find_objects(img) del img reverse_map = { v:k for k,v in remap.items() } # img -> old img bboxes = {} for label, slc in enumerate(slcs): if slc is None: continue mesh_bounds = Bbox.from_slices(slc) mesh_bounds += Vec(*offset) mesh_bounds *= Vec(*resolution, dtype=np.float32) bboxes[str(reverse_map[label+1])] = \ mesh_bounds.astype(resolution.dtype).to_list() bounds = bounds.astype(resolution.dtype) * resolution cf.put_json( f"{mesh_dir}/{bounds.to_filename(precision)}.spatial", bboxes, compress=compress, cache_control=False, )
def TransferTask( src_path, dest_path, mip, shape, offset, translate=(0, 0, 0), # change of origin fill_missing=False, skip_first=False, skip_downsamples=False, delete_black_uploads=False, background_color=0, sparse=False, axis='z', agglomerate=False, timestamp=None, compress='gzip', factor=None): shape = Vec(*shape) offset = Vec(*offset) fill_missing = bool(fill_missing) translate = Vec(*translate) delete_black_uploads = bool(delete_black_uploads) sparse = bool(sparse) skip_first = bool(skip_first) skip_downsamples = bool(skip_downsamples) srccv = CloudVolume(src_path, fill_missing=fill_missing, mip=mip, bounded=False) destcv = CloudVolume(dest_path, fill_missing=fill_missing, mip=mip, delete_black_uploads=delete_black_uploads, background_color=background_color, compress=compress) dst_bounds = Bbox(offset, shape + offset) dst_bounds = Bbox.clamp(dst_bounds, destcv.bounds) src_bounds = dst_bounds - translate image = srccv.download(src_bounds, agglomerate=agglomerate, timestamp=timestamp) if skip_downsamples: destcv[dst_bounds] = image else: downsample_and_upload(image, dst_bounds, destcv, shape, mip=mip, skip_first=skip_first, sparse=sparse, axis=axis, factor=factor)
def execute(self): self._volume = CloudVolume( self.layer_path, self.options['mip'], bounded=False, parallel=self.options['parallel_download'], fill_missing=self.options['fill_missing'] ) self._bounds = Bbox(self.offset, self.shape + self.offset) self._bounds = Bbox.clamp(self._bounds, self._volume.bounds) self.progress = bool(self.options['progress']) self._mesher = zmesh.Mesher(self._volume.resolution) # Marching cubes loves its 1vx overlaps. # This avoids lines appearing between # adjacent chunks. data_bounds = self._bounds.clone() data_bounds.minpt -= self.options['low_padding'] data_bounds.maxpt += self.options['high_padding'] self._mesh_dir = self.get_mesh_dir() if self.options['encoding'] == 'draco': self.draco_encoding_settings = draco_encoding_settings( shape=(self.shape + self.options['low_padding'] + self.options['high_padding']), offset=self.offset, resolution=self._volume.resolution, compression_level=self.options["draco_compression_level"], create_metadata=self.options['draco_create_metadata'], uses_new_draco_bin_size=False, ) # chunk_position includes the overlap specified by low_padding/high_padding # agglomerate, timestamp, stop_layer only applies to graphene volumes, # no-op for precomputed data = self._volume.download( data_bounds, agglomerate=self.options['agglomerate'], timestamp=self.options['timestamp'], stop_layer=self.options['stop_layer'] ) if not np.any(data): if self.options['spatial_index']: self._upload_spatial_index(self._bounds, {}) return data = self._remove_dust(data, self.options['dust_threshold']) data = self._remap(data) if self.options['object_ids']: data = fastremap.mask_except(data, self.options['object_ids'], in_place=True) data, renumbermap = fastremap.renumber(data, in_place=True) renumbermap = { v:k for k,v in renumbermap.items() } self.compute_meshes(data, renumbermap)
def BlackoutTask( cloudpath, mip, shape, offset, value=0, non_aligned_writes=False ): shape = Vec(*shape) offset = Vec(*offset) vol = CloudVolume(cloudpath, mip, non_aligned_writes=non_aligned_writes) bounds = Bbox(offset, shape + offset) bounds = Bbox.clamp(bounds, vol.bounds) img = np.zeros(bounds.size3(), dtype=vol.dtype) + value vol[bounds] = img
def execute(self): srcvol = CloudVolume(self.source_layer_path, mip=0, fill_missing=self.fill_missing) bounds = Bbox( self.offset, self.shape + self.offset ) bounds = Bbox.clamp(bounds, srcvol.bounds) image = srcvol[ bounds.to_slices() ][ :, :, :, :1 ] # only use x affinity image = (image * 255.0).astype(np.uint8) destvol = CloudVolume(self.dest_layer_path, mip=0) downsample_and_upload(image, bounds, destvol, self.shape, mip=0, axis='z')
def execute(self): self.cv = CloudVolume( self.cloudpath, mip=self.mip, bounded=False, fill_missing=self.options['fill_missing'], mesh_dir=self.options['mesh_dir'], ) if self.cv.mesh.meta.is_sharded() == False: raise ValueError("The mesh sharding parameter must be defined.") self.bounds = Bbox(self.offset, self.shape + self.offset) self.bounds = Bbox.clamp(self.bounds, self.cv.bounds) self.progress = bool(self.options['progress']) self.mesher = zmesh.Mesher(self.cv.resolution) # Marching cubes needs 1 voxel overlap to properly # stitch adjacent meshes. # data_bounds = self.bounds.clone() # data_bounds.maxpt += self.overlap_vx self.mesh_dir = self.get_mesh_dir() self.draco_encoding_settings = draco_encoding_settings( shape=(self.shape + self.overlap_vx), offset=self.offset, resolution=self.cv.resolution, compression_level=1, create_metadata=True, uses_new_draco_bin_size=self.cv.meta.uses_new_draco_bin_size, ) chunk_pos = self.cv.meta.point_to_chunk_position(self.bounds.center(), mip=self.mip) img = mesh_graphene_remap.remap_segmentation( self.cv, chunk_pos.x, chunk_pos.y, chunk_pos.z, mip=self.mip, overlap_vx=self.overlap_vx, time_stamp=self.timestamp, progress=self.progress, ) if not np.any(img): return self.upload_meshes(self.compute_meshes(img))
def create_blackout_tasks(cloudpath: str, bounds: Bbox, mip: int = 0, shape: ShapeType = (2048, 2048, 64), value: int = 0, non_aligned_writes: bool = False): vol = CloudVolume(cloudpath, mip=mip) shape = Vec(*shape) bounds = Bbox.create(bounds) bounds = vol.bbox_to_mip(bounds, mip=0, to_mip=mip) if not non_aligned_writes: bounds = bounds.expand_to_chunk_size(vol.chunk_size, vol.voxel_offset) bounds = Bbox.clamp(bounds, vol.mip_bounds(mip)) class BlackoutTaskIterator(FinelyDividedTaskIterator): def task(self, shape, offset): bounded_shape = min2(shape, vol.bounds.maxpt - offset) return partial( igneous.tasks.BlackoutTask, cloudpath=cloudpath, mip=mip, shape=shape.clone(), offset=offset.clone(), value=value, non_aligned_writes=non_aligned_writes, ) def on_finish(self): vol.provenance.processing.append({ 'method': { 'task': 'BlackoutTask', 'cloudpath': cloudpath, 'mip': mip, 'non_aligned_writes': non_aligned_writes, 'value': value, 'shape': shape.tolist(), 'bounds': [ bounds.minpt.tolist(), bounds.maxpt.tolist(), ], }, 'by': operator_contact(), 'date': strftime('%Y-%m-%d %H:%M %Z'), }) return BlackoutTaskIterator(bounds, shape)
def create_blackout_tasks(cloudpath, bounds, mip=0, shape=(2048, 2048, 64), value=0, non_aligned_writes=False): vol = CloudVolume(cloudpath, mip=mip) shape = Vec(*shape) bounds = Bbox.create(bounds) bounds = vol.bbox_to_mip(bounds, mip=0, to_mip=mip) bounds = Bbox.clamp(bounds, vol.mip_bounds(mip)) class BlackoutTaskIterator(): def __len__(self): return num_tasks(bounds, shape) def __iter__(self): for startpt in xyzrange(bounds.minpt, bounds.maxpt, shape): bounded_shape = min2(shape, vol.bounds.maxpt - startpt) yield igneous.tasks.BlackoutTask( cloudpath=cloudpath, mip=mip, shape=shape.clone(), offset=startpt.clone(), value=value, non_aligned_writes=non_aligned_writes, ) vol.provenance.processing.append({ 'method': { 'task': 'BlackoutTask', 'cloudpath': cloudpath, 'mip': mip, 'non_aligned_writes': non_aligned_writes, 'value': value, 'shape': shape.tolist(), 'bounds': [ bounds.minpt.tolist(), bounds.maxpt.tolist(), ], }, 'by': OPERATOR_CONTACT, 'date': strftime('%Y-%m-%d %H:%M %Z'), }) return BlackoutTaskIterator()
def create_touch_tasks(self, cloudpath, mip=0, shape=(2048, 2048, 64), bounds=None): vol = CloudVolume(cloudpath, mip=mip) shape = Vec(*shape) if bounds is None: bounds = vol.bounds.clone() bounds = Bbox.create(bounds) bounds = vol.bbox_to_mip(bounds, mip=0, to_mip=mip) bounds = Bbox.clamp(bounds, vol.mip_bounds(mip)) class TouchTaskIterator(): def __len__(self): return num_tasks(bounds, shape) def __iter__(self): for startpt in xyzrange(bounds.minpt, bounds.maxpt, shape): bounded_shape = min2(shape, vol.bounds.maxpt - startpt) yield igneous.tasks.TouchTask( cloudpath=cloudpath, shape=bounded_shape.clone(), offset=startpt.clone(), mip=mip, ) vol.provenance.processing.append({ 'method': { 'task': 'TouchTask', 'mip': mip, 'shape': shape.tolist(), 'bounds': [ bounds.minpt.tolist(), bounds.maxpt.tolist(), ], }, 'by': OPERATOR_CONTACT, 'date': strftime('%Y-%m-%d %H:%M %Z'), }) vol.commit_provenance() return TouchTaskIterator()
def get_bounds(vol, bounds, shape, mip, chunk_size=None): if bounds is None: bounds = vol.bounds.clone() else: bounds = Bbox.create(bounds) bounds = vol.bbox_to_mip(bounds, mip=0, to_mip=mip) if chunk_size is not None: bounds = bounds.expand_to_chunk_size(chunk_size, vol.mip_voxel_offset(mip)) bounds = Bbox.clamp(bounds, vol.mip_bounds(mip)) print("Volume Bounds: ", vol.mip_bounds(mip)) print("Selected ROI: ", bounds) return bounds
def create_connected_component_tasks( descpath, segpath, storagestr, storagedir, cc_thresh, sz_thresh, bounds, shape, mip=(8, 8, 40), parallel=1, hashmax=1): shape = Vec(*shape) vol = CloudVolume(segpath, mip=mip) # bounds = vol.bbox_to_mip(bounds, mip=0, to_mip=mip) bounds = Bbox.clamp(bounds, vol.bounds) class ConnectedComponentsTaskIterator(object): def __init__(self, level_start, level_end): self.level_start = level_start self.level_end = level_end def __len__(self): return self.level_end - self.level_start def __getitem__(self, slc): itr = copy.deepcopy(self) itr.level_start = self.level_start + slc.start itr.level_end = self.level_start + slc.stop return itr def __iter__(self): self.bounds = bounds.clone() self.bounds.minpt.z = bounds.minpt.z + self.level_start * shape.z self.bounds.maxpt.z = bounds.minpt.z + self.level_end * shape.z for startpt in xyzrange( self.bounds.minpt, self.bounds.maxpt, shape ): task_shape = min2(shape.clone(), self.bounds.maxpt - startpt) task_bounds = Bbox( startpt, startpt + task_shape ) if task_bounds.volume() < 1: continue chunk_begin = tup2str(task_bounds.minpt) chunk_end = tup2str(task_bounds.maxpt) mip_str = tup2str(mip) cmd = (f"chunk_ccs {descpath} {segpath} {storagestr}" f" {cc_thresh} {sz_thresh} --chunk_begin {chunk_begin}" f" --chunk_end {chunk_end} --hashmax {hashmax}" f" --parallel {parallel} --mip {mip_str}" f" --storagedir {storagedir}") yield SynaptorTask(cmd) level_end = int(math.ceil(bounds.size3().z / shape.z)) return ConnectedComponentsTaskIterator(0, level_end)
def QuantizeTask( source_layer_path, dest_layer_path, shape, offset, mip, fill_missing=False ): shape = Vec(*shape) offset = Vec(*offset) srcvol = CloudVolume(source_layer_path, mip=mip, fill_missing=fill_missing) bounds = Bbox(offset, shape + offset) bounds = Bbox.clamp(bounds, srcvol.bounds) image = srcvol[bounds.to_slices()][:, :, :, :1] # only use x affinity image = (image * 255.0).astype(np.uint8) destvol = CloudVolume(dest_layer_path, mip=mip) downsample_and_upload(image, bounds, destvol, shape, mip=mip, axis='z')
def execute(self): vol = CloudVolume(self.layer_path, mip=self.mip) highres_bbox = Bbox( self.offset, self.offset + self.shape ) top_mip = min(vol.available_mips[-1], self.mip + self.num_mips) for mip in range(self.mip, top_mip + 1): vol.mip = mip bbox = vol.bbox_to_mip(highres_bbox, self.mip, mip) bbox = bbox.round_to_chunk_size(vol.underlying, offset=vol.bounds.minpt) bbox = Bbox.clamp(bbox, vol.bounds) if bbox.volume() == 0: continue vol.delete(bbox)
def create_touch_tasks( self, cloudpath, mip=0, shape=(2048, 2048, 64), bounds=None ): vol = CloudVolume(cloudpath, mip=mip) shape = Vec(*shape) if bounds is None: bounds = vol.bounds.clone() bounds = Bbox.create(bounds) bounds = vol.bbox_to_mip(bounds, mip=0, to_mip=mip) bounds = Bbox.clamp(bounds, vol.mip_bounds(mip)) class TouchTaskIterator(FinelyDividedTaskIterator): def task(self, shape, offset): bounded_shape = min2(shape, vol.bounds.maxpt - offset) return igneous.tasks.TouchTask( cloudpath=cloudpath, shape=bounded_shape.clone(), offset=offset.clone(), mip=mip, ) def on_finish(self): vol.provenance.processing.append({ 'method': { 'task': 'TouchTask', 'mip': mip, 'shape': shape.tolist(), 'bounds': [ bounds.minpt.tolist(), bounds.maxpt.tolist(), ], }, 'by': OPERATOR_CONTACT, 'date': strftime('%Y-%m-%d %H:%M %Z'), }) vol.commit_provenance() return TouchTaskIterator(bounds, shape)
def generate_chunks(meta, img, offset, mip): shape = Vec(*img.shape)[:3] offset = Vec(*offset)[:3] bounds = Bbox(offset, shape + offset) alignment_check = bounds.round_to_chunk_size(meta.chunk_size(mip), meta.voxel_offset(mip)) if not np.all(alignment_check.minpt == bounds.minpt): raise AlignmentError(f""" Only chunk aligned writes are supported by this function. Got: {bounds} Volume Offset: {meta.voxel_offset(mip)} Nearest Aligned: {alignment_check} """) bounds = Bbox.clamp(bounds, meta.bounds(mip)) img_offset = bounds.minpt - offset img_end = Vec.clamp(bounds.size3() + img_offset, Vec(0, 0, 0), shape) class ChunkIterator(): def __len__(self): csize = meta.chunk_size(mip) bbox = Bbox(img_offset, img_end) # round up and avoid conversion to float n_chunks = (bbox.dx + csize[0] - 1) // csize[0] n_chunks *= (bbox.dy + csize[1] - 1) // csize[1] n_chunks *= (bbox.dz + csize[2] - 1) // csize[2] return n_chunks def __iter__(self): for startpt in xyzrange(img_offset, img_end, meta.chunk_size(mip)): startpt = startpt.clone() endpt = min2(startpt + meta.chunk_size(mip), shape) spt = (startpt + bounds.minpt).astype(int) ept = (endpt + bounds.minpt).astype(int) yield (startpt, endpt, spt, ept) return ChunkIterator()
def DeleteTask(layer_path:str, shape, offset, mip=0, num_mips=5): """Delete a block of images inside a layer on all mip levels.""" shape = Vec(*shape) offset = Vec(*offset) vol = CloudVolume(layer_path, mip=mip, max_redirects=0) highres_bbox = Bbox( offset, offset + shape ) top_mip = min(vol.available_mips[-1], mip + num_mips) for mip_i in range(mip, top_mip + 1): vol.mip = mip_i bbox = vol.bbox_to_mip(highres_bbox, mip, mip_i) bbox = bbox.round_to_chunk_size(vol.chunk_size, offset=vol.bounds.minpt) bbox = Bbox.clamp(bbox, vol.bounds) if bbox.volume() == 0: continue vol.delete(bbox)
def execute(self): srccv = CloudVolume(self.src_path, mip=self.mip, fill_missing=True) # Accumulate a histogram of the luminance levels nbits = np.dtype(srccv.dtype).itemsize * 8 levels = np.zeros(shape=(2 ** nbits,), dtype=np.uint64) bounds = Bbox(self.offset, self.shape[:3] + self.offset) bounds = Bbox.clamp(bounds, srccv.bounds) bboxes = self.select_bounding_boxes(bounds) for bbox in bboxes: img2d = srccv[bbox.to_slices()].reshape((bbox.volume())) cts = np.bincount(img2d) levels[0:len(cts)] += cts.astype(np.uint64) covered_area = sum([bbx.volume() for bbx in bboxes]) bboxes = [(bbox.volume(), bbox.size3()) for bbox in bboxes] bboxes.sort(key=lambda x: x[0]) biggest = bboxes[-1][1] output = { "levels": levels.tolist(), "patch_size": biggest.tolist(), "num_patches": len(bboxes), "coverage_ratio": covered_area / self.shape.rectVolume(), } path = self.levels_path if self.levels_path else self.src_path path = os.path.join(path, 'levels') cf = CloudFiles(path) cf.put_json( path="{}/{}".format(self.mip, self.offset.z), content=output, cache_control='no-cache' )
def create_transfer_tasks(src_layer_path, dest_layer_path, chunk_size=None, shape=Vec(2048, 2048, 64), fill_missing=False, translate=(0, 0, 0), bounds=None, mip=0, preserve_chunk_size=True, encoding=None): """ Transfer data from one data layer to another. It's possible to transfer from a lower resolution mip level within a given bounding box. The bounding box should be specified in terms of the highest resolution. """ shape = Vec(*shape) vol = CloudVolume(src_layer_path, mip=mip) translate = Vec(*translate) // vol.downsample_ratio if not chunk_size: chunk_size = vol.info['scales'][mip]['chunk_sizes'][0] chunk_size = Vec(*chunk_size) try: dvol = CloudVolume(dest_layer_path, mip=mip) except Exception: # no info file info = copy.deepcopy(vol.info) dvol = CloudVolume(dest_layer_path, info=info) dvol.commit_info() if encoding is not None: dvol.info['scales'][mip]['encoding'] = encoding dvol.info['scales'] = dvol.info['scales'][:mip + 1] dvol.info['scales'][mip]['chunk_sizes'] = [chunk_size.tolist()] dvol.commit_info() create_downsample_scales(dest_layer_path, mip=mip, ds_shape=shape, preserve_chunk_size=preserve_chunk_size, encoding=encoding) if bounds is None: bounds = vol.bounds.clone() else: bounds = vol.bbox_to_mip(bounds, mip=0, to_mip=mip) bounds = Bbox.clamp(bounds, dvol.bounds) dvol_bounds = dvol.mip_bounds(mip).clone() class TransferTaskIterator(object): def __len__(self): return int(reduce(operator.mul, np.ceil(bounds.size3() / shape))) def __iter__(self): for startpt in xyzrange(bounds.minpt, bounds.maxpt, shape): task_shape = min2(shape.clone(), dvol_bounds.maxpt - startpt) yield TransferTask( src_path=src_layer_path, dest_path=dest_layer_path, shape=task_shape, offset=startpt.clone(), fill_missing=fill_missing, translate=translate, mip=mip, ) job_details = { 'method': { 'task': 'TransferTask', 'src': src_layer_path, 'dest': dest_layer_path, 'shape': list(map(int, shape)), 'fill_missing': fill_missing, 'translate': list(map(int, translate)), 'bounds': [bounds.minpt.tolist(), bounds.maxpt.tolist()], 'mip': mip, }, 'by': OPERATOR_CONTACT, 'date': strftime('%Y-%m-%d %H:%M %Z'), } dvol = CloudVolume(dest_layer_path) dvol.provenance.sources = [src_layer_path] dvol.provenance.processing.append(job_details) dvol.commit_provenance() if vol.path.protocol != 'boss': vol.provenance.processing.append(job_details) vol.commit_provenance() return TransferTaskIterator()