def execute(self, slot, subindex, roi, result): clipped_block_rois = getIntersectingRois(self.Input.meta.shape, self.BlockShape.value, (roi.start, roi.stop), True) if self._always_request_full_blocks: full_block_rois = getIntersectingRois(self.Input.meta.shape, self.BlockShape.value, (roi.start, roi.stop), False) else: full_block_rois = clipped_block_rois pool = RequestPool() for full_block_roi, clipped_block_roi in zip(full_block_rois, clipped_block_rois): full_block_roi = numpy.asarray(full_block_roi) clipped_block_roi = numpy.asarray(clipped_block_roi) req = self.Input(*full_block_roi) output_roi = numpy.asarray(clipped_block_roi) - roi.start if (full_block_roi == clipped_block_roi).all(): req.writeInto(result[roiToSlice(*output_roi)]) else: roi_within_block = clipped_block_roi - full_block_roi[0] def copy_request_result(output_roi, roi_within_block, request_result): self.Output.stype.copy_data( result[roiToSlice(*output_roi)], request_result[roiToSlice(*roi_within_block)]) req.notify_finished( partial(copy_request_result, output_roi, roi_within_block)) pool.add(req) del req pool.wait()
def execute(self, slot, subindex, roi, result): clipped_block_rois = getIntersectingRois( self.Input.meta.shape, self.BlockShape.value, (roi.start, roi.stop), True ) if self._always_request_full_blocks: full_block_rois = getIntersectingRois( self.Input.meta.shape, self.BlockShape.value, (roi.start, roi.stop), False ) else: full_block_rois = clipped_block_rois pool = RequestPool() for full_block_roi, clipped_block_roi in zip(full_block_rois, clipped_block_rois): full_block_roi = numpy.asarray(full_block_roi) clipped_block_roi = numpy.asarray(clipped_block_roi) req = self.Input(*full_block_roi) output_roi = numpy.asarray(clipped_block_roi) - roi.start if (full_block_roi == clipped_block_roi).all(): req.writeInto(result[roiToSlice(*output_roi)]) else: roi_within_block = clipped_block_roi - full_block_roi[0] def copy_request_result(output_roi, roi_within_block, request_result): self.Output.stype.copy_data( result[roiToSlice(*output_roi)], request_result[roiToSlice(*roi_within_block)] ) req.notify_finished(partial(copy_request_result, output_roi, roi_within_block)) pool.add(req) del req pool.wait()
def _execute_Output(self, slot, subindex, roi, result): """ Overridden from OpUnblockedArrayCache """ def copy_block(full_block_roi, clipped_block_roi): full_block_roi = numpy.asarray(full_block_roi) clipped_block_roi = numpy.asarray(clipped_block_roi) output_roi = numpy.asarray(clipped_block_roi) - roi.start block_roi = self._get_containing_block_roi(clipped_block_roi) # Skip cache and copy full block directly if self.BypassModeEnabled.value: full_block_data = self.Output.stype.allocateDestination(SubRegion(self.Output, *full_block_roi)) self.Input(*full_block_roi).writeInto(full_block_data).block() roi_within_block = clipped_block_roi - full_block_roi[0] self.Output.stype.copy_data( result[roiToSlice(*output_roi)], full_block_data[roiToSlice(*roi_within_block)] ) # If data data exists already or we can just fetch it without needing extra scratch space, # just call the base class elif block_roi is not None or (full_block_roi == clipped_block_roi).all(): self._execute_Output_impl(clipped_block_roi, result[roiToSlice(*output_roi)]) elif self.Input.meta.dontcache: # Data isn't in the cache, but we don't need it in the cache anyway. self.Input(*clipped_block_roi).writeInto(result[roiToSlice(*output_roi)]).block() else: # Data doesn't exist yet in the cache. # Request the full block, but then discard the parts we don't need. # (We use allocateDestination() here to support MaskedArray types.) # TODO: We should probably just get rid of MaskedArray support altogether... full_block_data = self.Output.stype.allocateDestination(SubRegion(self.Output, *full_block_roi)) self._execute_Output_impl(full_block_roi, full_block_data) roi_within_block = clipped_block_roi - full_block_roi[0] self.Output.stype.copy_data( result[roiToSlice(*output_roi)], full_block_data[roiToSlice(*roi_within_block)] ) clipped_block_rois = getIntersectingRois(self.Input.meta.shape, self._blockshape, (roi.start, roi.stop), True) full_block_rois = getIntersectingRois(self.Input.meta.shape, self._blockshape, (roi.start, roi.stop), False) pool = RequestPool() for full_block_roi, clipped_block_roi in zip(full_block_rois, clipped_block_rois): req = Request(partial(copy_block, full_block_roi, clipped_block_roi)) pool.add(req) pool.wait()
def _execute_Output(self, slot, subindex, roi, result): """ Overridden from OpUnblockedArrayCache """ def copy_block(full_block_roi, clipped_block_roi): full_block_roi = numpy.asarray(full_block_roi) clipped_block_roi = numpy.asarray(clipped_block_roi) output_roi = numpy.asarray(clipped_block_roi) - roi.start # If data data exists already or we can just fetch it without needing extra scratch space, # just call the base class block_roi = self._get_containing_block_roi(clipped_block_roi) if block_roi is not None or (full_block_roi == clipped_block_roi).all(): self._execute_Output_impl(clipped_block_roi, result[roiToSlice(*output_roi)]) elif self.Input.meta.dontcache: # Data isn't in the cache, but we don't need it in the cache anyway. self.Input(*clipped_block_roi).writeInto( result[roiToSlice(*output_roi)]).block() else: # Data doesn't exist yet in the cache. # Request the full block, but then discard the parts we don't need. # (We use allocateDestination() here to support MaskedArray types.) # TODO: We should probably just get rid of MaskedArray support altogether... full_block_data = self.Output.stype.allocateDestination( SubRegion(self.Output, *full_block_roi)) self._execute_Output_impl(full_block_roi, full_block_data) roi_within_block = clipped_block_roi - full_block_roi[0] self.Output.stype.copy_data( result[roiToSlice(*output_roi)], full_block_data[roiToSlice(*roi_within_block)]) clipped_block_rois = getIntersectingRois(self.Input.meta.shape, self._blockshape, (roi.start, roi.stop), True) full_block_rois = getIntersectingRois(self.Input.meta.shape, self._blockshape, (roi.start, roi.stop), False) pool = RequestPool() for full_block_roi, clipped_block_roi in zip(full_block_rois, clipped_block_rois): req = Request( partial(copy_block, full_block_roi, clipped_block_roi)) pool.add(req) pool.wait()
def _setInSlotInput(self, slot, subindex, roi, new_pixels): """ Since this is a label array, inserting pixels has a special meaning: We only overwrite the new non-zero pixels. In the new data, zeros mean "don't change". So, here's what each pixel we're adding means: 0: don't change 1: change to 1 2: change to 2 ... N: change to N eraser_magic_value: change to 0 """ if isinstance(new_pixels, vigra.VigraArray): new_pixels = new_pixels.view(numpy.ndarray) # Get logical blocking. block_rois = getIntersectingRois(self.Output.meta.shape, self._blockshape, (roi.start, roi.stop)) # Convert to tuples block_rois = [(tuple(start), tuple(stop)) for start, stop in block_rois] max_label = 0 for block_roi in block_rois: roi_within_data = numpy.array(block_roi) - roi.start new_block_pixels = new_pixels[roiToSlice(*roi_within_data)] # Shortcut: Nothing to change if this block is all zeros. if not new_block_pixels.any(): continue block_slot_roi = SubRegion(self.Output, *block_roi) # Extract the data to modify original_block_data = self.Output.stype.allocateDestination(block_slot_roi) self.execute(self.Output, (), block_slot_roi, original_block_data) # Reset the pixels we need to change (so we can use |= below) original_block_data[new_block_pixels.nonzero()] = 0 # Update original_block_data |= new_block_pixels # Replace 'eraser' values with zeros. cleaned_block_data = original_block_data.copy() cleaned_block_data[original_block_data == self._eraser_magic_value] = 0 # Set in the cache (our superclass). super(OpCompressedUserLabelArray, self)._setInSlotInput( slot, subindex, block_slot_roi, cleaned_block_data, store_zero_blocks=False ) max_label = max(max_label, cleaned_block_data.max()) # We could wait to send out one big dirty notification (instead of one per block), # But that might result in a lot of unecessarily dirty pixels in cases when the # new_pixels were mostly empty (such as when importing labels from disk). # That's bad for downstream operators like OpFeatureMatrixCache # So instead, we only send notifications for the blocks that were touched. # During labeling, it makes no difference. # During project import, this is slightly worse. # But during label import from disk, this is very important.a # FIXME: Shouldn't this notification be triggered from within OpUnmanagedCompressedCache? self.Output.setDirty(*block_roi) return max_label # Internal use: Return max label