コード例 #1
0
    def _store_block_data(self, block_roi, block_data):
        """
        Copy block_data and store it into the cache.
        The block_lock is not obtained here, so lock it before you call this.
        """
        with self._lock:
            if self.CompressionEnabled.value and numpy.dtype(
                    block_data.dtype) in [
                        numpy.dtype(numpy.uint8),
                        numpy.dtype(numpy.uint32),
                        numpy.dtype(numpy.float32)
                    ]:
                compressed_block = vigra.ChunkedArrayCompressed(
                    block_data.shape, vigra.Compression.LZ4, block_data.dtype)
                compressed_block[:] = block_data
                block_storage_data = compressed_block
            else:
                block_storage_data = block_data.copy()

            # Store the data.
            # First double-check that the block wasn't removed from the
            #   cache while we were requesting it.
            # (Could have happened via propagateDirty() or eventually the arrayCacheMemoryMgr)
            if block_roi in self._block_locks:
                self._block_data[block_roi] = block_storage_data

        self._last_access_times[block_roi] = time.time()
コード例 #2
0
def save_debug_image(name, image, out_debug_image_dict):
    """
    If output_debug_image_dict isn't None, save the
    given image in the dict as a compressed array.
    """
    if out_debug_image_dict is None:
        return

    if hasattr(image, 'axistags'):
        axistags = image.axistags
    else:
        axistags = None

    out_debug_image_dict[name] = vigra.ChunkedArrayCompressed(
        image.shape, dtype=image.dtype, axistags=axistags)
    out_debug_image_dict[name][:] = image
コード例 #3
0
    def _setDefaultInternals(self):
        # chunk array shape calculation
        shape = self._Input.meta.shape
        if self.ChunkShape.ready():
            chunkShape = (1, ) + self.ChunkShape.value + (1, )
        elif self._Input.meta.ideal_blockshape is not None and\
                np.prod(self._Input.meta.ideal_blockshape) > 0:
            chunkShape = self._Input.meta.ideal_blockshape
        else:
            chunkShape = self._automaticChunkShape(self._Input.meta.shape)
        assert len(shape) == len(chunkShape),\
            "Encountered an invalid chunkShape"
        chunkShape = np.minimum(shape, chunkShape)
        f = lambda i: shape[i] // chunkShape[i] + (1 if shape[i] % chunkShape[
            i] else 0)
        self._chunkArrayShape = tuple(map(f, range(len(shape))))
        self._chunkShape = np.asarray(chunkShape, dtype=np.int)
        self._shape = shape

        # determine the background values
        self._background = np.zeros((shape[0], shape[4]),
                                    dtype=self.Input.meta.dtype)
        if self.Background.ready():
            bg = self.Background[...].wait()
            bg = vigra.taggedView(bg, axistags="txyzc").withAxes('t', 'c')
            # we might have an old value set for the background value
            # ignore it until it is configured correctly, or execute is called
            if bg.size > 1 and \
                    (shape[0] != bg.shape[0] or shape[4] != bg.shape[1]):
                self._background_valid = False
            else:
                self._background_valid = True
                self._background[:] = bg
        else:
            self._background_valid = True

        # manager object
        self._manager = _LabelManager()

        ### local labels ###
        # cache for local labels
        # adjust cache chunk shape to our chunk shape
        cs = tuple(map(_get_next_power, self._chunkShape))
        logger.debug("Creating cache with chunk shape {}".format(cs))
        self._cache = vigra.ChunkedArrayCompressed(shape,
                                                   dtype=_LABEL_TYPE,
                                                   chunk_shape=cs)

        ### global indices ###
        # offset (global labels - local labels) per chunk
        self._globalLabelOffset = np.ones(self._chunkArrayShape,
                                          dtype=_LABEL_TYPE)
        # keep track of number of indices in chunk (-1 == not labeled yet)
        self._numIndices = -np.ones(self._chunkArrayShape, dtype=np.int32)

        # union find data structure, tells us for every global index to which
        # label it belongs
        self._uf = UnionFindArray(_LABEL_TYPE(1))

        ### global labels ###
        # keep track of assigned global labels
        gen = partial(InfiniteLabelIterator, 1, dtype=_LABEL_TYPE)
        self._labelIterators = defaultdict(gen)
        self._globalToFinal = defaultdict(dict)
        self._isFinal = np.zeros(self._chunkArrayShape, dtype=np.bool)

        ### algorithmic ###

        # keep track of merged regions
        self._mergeMap = defaultdict(list)

        # locks that keep threads from changing a specific chunk
        self._chunk_locks = defaultdict(HardLock)
コード例 #4
0
    def execute(self, slot, subindex, roi, result):
        with self._lock:
            # Does this roi happen to fit ENTIRELY within an existing stored block?
            outer_rois = containing_rois(self._block_data.keys(),
                                         (roi.start, roi.stop))
            if len(outer_rois) > 0:
                # Use the first one we found
                block_roi = self._standardize_roi(*outer_rois[0])
                block_relative_roi = numpy.array(
                    (roi.start, roi.stop)) - block_roi[0]
                self.Output.stype.copy_data(
                    result, self._block_data[block_roi][roiToSlice(
                        *block_relative_roi)])
                return

        # Standardize roi for usage as dict key
        block_roi = self._standardize_roi(roi.start, roi.stop)

        # Get lock for this block (create first if necessary)
        with self._lock:
            if block_roi not in self._block_locks:
                self._block_locks[block_roi] = RequestLock()
            block_lock = self._block_locks[block_roi]

        # Handle identical simultaneous requests
        with block_lock:
            try:
                # Extra [:] here is in case we are decompressing from a chunkedarray
                self.Output.stype.copy_data(result,
                                            self._block_data[block_roi][:])
                return
            except KeyError:  # Not yet stored: Request it now.

                # We attach a special attribute to the array to allow the upstream operator
                #  to optionally tell us not to bother caching the data.
                self.Input(roi.start, roi.stop).writeInto(result).block()

                if self.Input.meta.dontcache:
                    # The upstream operator says not to bother caching the data.
                    # (For example, see OpCacheFixer.)
                    return

                if self.CompressionEnabled.value and numpy.dtype(
                        result.dtype) in [
                            numpy.dtype(numpy.uint8),
                            numpy.dtype(numpy.uint32),
                            numpy.dtype(numpy.float32)
                        ]:
                    compressed_block = vigra.ChunkedArrayCompressed(
                        result.shape, vigra.Compression.LZ4, result.dtype)
                    compressed_block[:] = result
                    block_storage_data = compressed_block
                else:
                    block_storage_data = result.copy()

                with self._lock:
                    # Store the data.
                    # First double-check that the block wasn't removed from the
                    #   cache while we were requesting it.
                    # (Could have happened via propagateDirty() or eventually the arrayCacheMemoryMgr)
                    if block_roi in self._block_locks:
                        self._block_data[block_roi] = block_storage_data
            self._last_access_times[block_roi] = time.time()