def copy_block( full_block_roi, clipped_block_roi ):
            full_block_roi = numpy.asarray(full_block_roi)
            clipped_block_roi = numpy.asarray(clipped_block_roi)
            output_roi = numpy.asarray(clipped_block_roi) - roi.start

            # If data data exists already or we can just fetch it without needing extra scratch space,
            # just call the base class
            block_roi = self._get_containing_block_roi( clipped_block_roi )
            if block_roi is not None or (full_block_roi == clipped_block_roi).all():
                self._execute_Output_impl( clipped_block_roi, result[roiToSlice(*output_roi)] )
            elif self.Input.meta.dontcache:
                # Data isn't in the cache, but we don't need it in the cache anyway.
                self.Input(*clipped_block_roi).writeInto(result[roiToSlice(*output_roi)]).block()
            else:
                # Data doesn't exist yet in the cache.
                # Request the full block, but then discard the parts we don't need.
                
                # (We use allocateDestination() here to support MaskedArray types.)
                # TODO: We should probably just get rid of MaskedArray support altogether...
                full_block_data = self.Output.stype.allocateDestination( SubRegion(self.Output, *full_block_roi ) )
                self._execute_Output_impl( full_block_roi, full_block_data )
    
                roi_within_block = clipped_block_roi - full_block_roi[0]
                self.Output.stype.copy_data( result[roiToSlice(*output_roi)],
                                             full_block_data[roiToSlice(*roi_within_block)] )
Пример #2
0
        def setInSlot(self, slot, key, value):
            start, stop = sliceToRoi(key, self.shape)
            
            blockStart = (1.0 * start / self._blockShape).floor()
            blockStop = (1.0 * stop / self._blockShape).ceil()
            blockStop = numpy.where(stop == self.shape, self._dirtyShape, blockStop)
            blockKey = roiToSlice(blockStart,blockStop)
            innerBlocks = self._blockNumbers[blockKey]
            for b_ind in innerBlocks.ravel():

                offset = self._blockShape*self._flatBlockIndices[b_ind]
                bigstart = numpy.maximum(offset, start)
                bigstop = numpy.minimum(offset + self._blockShape, stop)
                smallstart = bigstart-offset
                smallstop = bigstop - offset
                bigkey = roiToSlice(bigstart-start, bigstop-start)
                smallkey = roiToSlice(smallstart, smallstop)
                if not b_ind in self._labelers:
                    self._labelers[b_ind]=OpSparseLabelArray(self)
                    self._labelers[b_ind].inputs["shape"].setValue(self._blockShape)
                    self._labelers[b_ind].inputs["eraser"].setValue(self.inputs["eraser"].value)
                    self._labelers[b_ind].inputs["deleteLabel"].setValue(self.inputs["deleteLabel"])
                    
                self._labelers[b_ind].inputs["Input"][smallkey] = value[tuple(bigkey)].squeeze()
            
            self.outputs["Output"].setDirty(key)
    def _copyData(self, roi, destination, block_starts):
        """
        Copy data from each block into the destination array.
        For blocks that aren't currently stored, just write zeros.
        """
        # (Parallelism not needed here: h5py will serialize these requests anyway)
        block_starts = map( tuple, block_starts )
        for block_start in block_starts:
            entire_block_roi = getBlockBounds( self.Output.meta.shape, self._blockshape, block_start )

            # This block's portion of the roi
            intersecting_roi = getIntersection( (roi.start, roi.stop), entire_block_roi )
            
            # Compute slicing within destination array and slicing within this block
            destination_relative_intersection = numpy.subtract(intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(intersecting_roi, block_start)
            destination_relative_intersection_slicing = roiToSlice(*destination_relative_intersection)
            block_relative_intersection_slicing = roiToSlice(*block_relative_intersection)

            if block_start in self._cacheFiles:
                # Copy from block to destination
                dataset = self._getBlockDataset( entire_block_roi )

                if self.Output.meta.has_mask:
                    destination[ destination_relative_intersection_slicing ] = dataset["data"][ block_relative_intersection_slicing ]
                    destination.mask[ destination_relative_intersection_slicing ] = dataset["mask"][ block_relative_intersection_slicing ]
                    destination.fill_value = dataset["fill_value"][()]
                else:
                    destination[ destination_relative_intersection_slicing ] = dataset[ block_relative_intersection_slicing ]
            else:
                # Not stored yet.  Overwrite with zeros.
                destination[ destination_relative_intersection_slicing ] = 0
Пример #4
0
        def setInSlot(self, slot, key, value):
            shape = self.inputs["shape"].value
            eraseLabel = self.inputs["eraser"].value
            neutralElement = 0
    
            self.lock.acquire()
            #fix slicing of single dimensions:            
            start, stop = sliceToRoi(key, shape, extendSingleton = False)
            start = start.floor()
            stop = stop.floor()
            
            tempKey = roiToSlice(start-start, stop-start, hardBind = True)
            
            stop += numpy.where(stop-start == 0,1,0)

            key = roiToSlice(start,stop)

            updateShape = tuple(stop-start)
    
            update = self._denseArray[key].copy()
            
            update[tempKey] = value

            startRavel = numpy.ravel_multi_index(numpy.array(start, numpy.int32),shape)
            
            #insert values into dict
            updateNZ = numpy.nonzero(numpy.where(update != neutralElement,1,0))
            updateNZRavelSmall = numpy.ravel_multi_index(updateNZ, updateShape)
            
            if isinstance(value, numpy.ndarray):
                valuesNZ = value.ravel()[updateNZRavelSmall]
            else:
                valuesNZ = value
    
            updateNZRavel = numpy.ravel_multi_index(updateNZ, shape)
            updateNZRavel += startRavel        
    
            self._denseArray.ravel()[updateNZRavel] = valuesNZ        
            
            valuesNZ = self._denseArray.ravel()[updateNZRavel]
            
            self._denseArray.ravel()[updateNZRavel] =  valuesNZ       
    
            
            td = blist.sorteddict(zip(updateNZRavel.tolist(),valuesNZ.tolist()))
       
            self._sparseNZ.update(td)
            
            #remove values to be deleted
            updateNZ = numpy.nonzero(numpy.where(update == eraseLabel,1,0))
            if len(updateNZ)>0:
                updateNZRavel = numpy.ravel_multi_index(updateNZ, shape)
                updateNZRavel += startRavel    
                self._denseArray.ravel()[updateNZRavel] = neutralElement
                for index in updateNZRavel:
                    self._sparseNZ.pop(index)
            
            self.lock.release()
            
            self.outputs["Output"].setDirty(key)
Пример #5
0
    def execute(self, slot, subindex, roi, result):
        key = roiToSlice(roi.start,roi.stop)

        #subregion start and stop
        start = self.inputs["region_start"].value
        stop = self.inputs["region_stop"].value

        #get start and stop coordinates
        ostart, ostop = sliceToRoi(key, self.shape)
        #calculate the new reading start and stop coordinates
        rstart = start + ostart
        rstop  = start + ostop
        #create reading key
        rkey = roiToSlice(rstart,rstop)

        #write the subregion to the output
        #self.inputs["Input"][key] returns an "GetItemWriterObject" object
        #its method "writeInto" will be called, which will call the
        #"fireRequest" method of the, in this case, the Input-Slot,
        #which will return an "GetItemRequestObject" object. While this
        #object will be creating the "putTask" method of the graph object
        #will be called
        req = self.inputs["Input"][rkey].writeInto(result)
        res = req()
        return res
Пример #6
0
    def setInSlot(self, slot, subindex, roi, value):
        assert slot == self.inputs["Input"]
        ch = self._cacheHits
        ch += 1
        self._cacheHits = ch
        start, stop = roi.start, roi.stop
        blockStart = numpy.ceil(1.0 * start / self._blockShape)
        blockStop = numpy.floor(1.0 * stop / self._blockShape)
        blockStop = numpy.where(stop == self.Output.meta.shape,
                                self._dirtyShape, blockStop)
        blockKey = roiToSlice(blockStart, blockStop)

        if (self._blockState[blockKey] != OpArrayCache.CLEAN).any():
            start2 = blockStart * self._blockShape
            stop2 = blockStop * self._blockShape
            stop2 = numpy.minimum(stop2, self.Output.meta.shape)
            key2 = roiToSlice(start2, stop2)
            with self._lock:
                if self._cache is None:
                    self._allocateCache()
                self.Output.stype.copy_data(
                    self._cache[key2], value[roiToSlice(
                        start2 - start, stop2 - start)])
                self._blockState[blockKey] = self._dirtyState
                self._blockQuery[blockKey] = None
Пример #7
0
 def setInSlot(self, slot, key, value):
     if slot == self.inputs["Input"]:
         ch = self._cacheHits
         ch += 1
         self._cacheHits = ch
         start, stop = sliceToRoi(key, self.shape)
         blockStart = numpy.ceil(1.0 * start / self._blockShape)
         blockStop = numpy.floor(1.0 * stop / self._blockShape)
         blockStop = numpy.where(stop == self.shape, self._dirtyShape, blockStop)
         blockKey = roiToSlice(blockStart,blockStop)
 
         if (self._blockState[blockKey] != 2).any():
             start2 = blockStart * self._blockShape
             stop2 = blockStop * self._blockShape
             stop2 = numpy.minimum(stop2, self.shape)
             key2 = roiToSlice(start2,stop2)
             self._lock.acquire()
             if self._cache is None:
                 self._allocateCache()
             self._cache[key2] = value[roiToSlice(start2-start,stop2-start)]
             self._blockState[blockKey] = self._dirtyState
             self._blockQuery[blockKey] = None
             self._lock.release()
         
         #pass request on
         #if not self._fixed:
         #    self.outputs["Output"][key] = value
     if slot == self.inputs["fixAtCurrent"]:
         self._fixed = value
         assert 1==2
Пример #8
0
    def _serialize(self, group, name, slot):
        logger.debug("Serializing BlockSlot: {}".format( self.name ))
        mygroup = group.create_group(name)
        num = len(self.blockslot)
        for index in range(num):
            subname = self.subname.format(index)
            subgroup = mygroup.create_group(subname)
            nonZeroBlocks = self.blockslot[index].value
            for blockIndex, slicing in enumerate(nonZeroBlocks):
                block = self.slot[index][slicing].wait()
                blockName = 'block{:04d}'.format(blockIndex)

                if self._shrink_to_bb:
                    nonzero_coords = numpy.nonzero(block)
                    if len(nonzero_coords[0]) > 0:
                        block_start = sliceToRoi( slicing, (0,)*len(slicing) )[0]
                        block_bounding_box_start = numpy.array( map( numpy.min, nonzero_coords ) )
                        block_bounding_box_stop = 1 + numpy.array( map( numpy.max, nonzero_coords ) )
                        block_slicing = roiToSlice( block_bounding_box_start, block_bounding_box_stop )
                        bounding_box_roi = numpy.array([block_bounding_box_start, block_bounding_box_stop])
                        bounding_box_roi += block_start
                        
                        # Overwrite the vars that are written to the file
                        slicing = roiToSlice(*bounding_box_roi)
                        block = block[block_slicing]

                subgroup.create_dataset(blockName, data=block)
                subgroup[blockName].attrs['blockSlice'] = slicingToString(slicing)
Пример #9
0
    def execute(self, slot, subindex, roi, result):
        key = roiToSlice(roi.start,roi.stop)

        #make shape of the input known
        shape = self.inputs["Input"].meta.shape
        #get N-D coordinate out of slice
        rstart, rstop = sliceToRoi(key, shape)

        #shift the reading scope
        rstart -=  self.shift
        rstop  -=  self.shift

        #calculate wrinting scope
        wstart = - numpy.minimum(rstart,rstart-rstart)
        wstop  = result.shape + numpy.minimum(numpy.array(shape)-rstop, rstop-rstop)

        #shifted rstart/rstop has to be in the original range (not out of range)
        #for shifts in both directions
        rstart = numpy.minimum(rstart,numpy.array(shape))
        rstart = numpy.maximum(rstart, rstart - rstart)
        rstop  = numpy.minimum(rstop,numpy.array(shape))
        rstop = numpy.maximum(rstop, rstop-rstop)

        #create slice out of the reading start and stop coordinates
        rkey = roiToSlice(rstart,rstop)

        #create slice out of the reading start and stop coordinates
        wkey = roiToSlice(wstart,wstop)

        #prefill result array with 0's
        result[:] = 0
        #write the shifted scope to the output
        req = self.inputs["Input"][rkey].writeInto(result[wkey])
        res = req.wait()
        return res
Пример #10
0
    def _setInSlotInput(self, slot, subindex, roi, value):
        assert len(roi.stop) == len(self.Input.meta.shape), "roi: {} has the wrong number of dimensions for Input shape: {}".format( roi, self.Input.meta.shape )
        assert numpy.less_equal(roi.stop, self.Input.meta.shape).all(), "roi: {} is out-of-bounds for Input shape: {}".format( roi, self.Input.meta.shape )
        
        block_starts = getIntersectingBlocks( self._blockshape, (roi.start, roi.stop) )
        block_starts = map( tuple, block_starts )

        # Copy data to each block
        logger.debug( "Copying data INTO {} blocks...".format( len(block_starts) ) )
        for block_start in block_starts:
            entire_block_roi = getBlockBounds( self.Input.meta.shape, self._blockshape, block_start )

            # This block's portion of the roi
            intersecting_roi = getIntersection( (roi.start, roi.stop), entire_block_roi )
            
            # Compute slicing within source array and slicing within this block
            source_relative_intersection = numpy.subtract(intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(intersecting_roi, block_start)
            
            # Copy from source to block
            dataset = self._getBlockDataset( entire_block_roi )
            dataset[ roiToSlice( *block_relative_intersection ) ] = value[ roiToSlice(*source_relative_intersection) ]

            # Here, we assume that if this function is used to update ANY PART of a 
            #  block, he is responsible for updating the ENTIRE block.
            # Therefore, this block is no longer 'dirty'
            self._dirtyBlocks.discard( block_start )
Пример #11
0
    def execute(self, slot, subindex, roi, result):
        block_roi_start = roi.start / DVID_BLOCK_WIDTH
        block_roi_stop = ( roi.stop + DVID_BLOCK_WIDTH-1 ) / DVID_BLOCK_WIDTH
        block_slicing = roiToSlice(block_roi_start, block_roi_stop)

        if (numpy.array( (roi.start, roi.stop) ) % DVID_BLOCK_WIDTH).any():
            # Create an array that is bigger than the result, but block-aligned.
            aligned_result_shape = (block_roi_stop - block_roi_start) * DVID_BLOCK_WIDTH
            aligned_result = numpy.ndarray( aligned_result_shape, numpy.uint8 )
        else:
            aligned_result = result

        aligned_result_view = blockwise_view(aligned_result, 3*(DVID_BLOCK_WIDTH,), require_aligned_blocks=False)

        if self._transpose_axes:
            dset_slicing = tuple(reversed(block_slicing))
            # broadcast 3d data into 6d view
            aligned_result_view[:] = self._dset[dset_slicing].transpose()[..., None, None, None]
        else:
            # broadcast 3d data into 6d view
            aligned_result_view[:] = self._dset[block_slicing][..., None, None, None]
        
        # If the result wasn't aligned, we couldn't broadcast directly to it.
        # Copy the data now.
        if aligned_result is not result:
            start = roi.start - (block_roi_start*DVID_BLOCK_WIDTH)
            stop = start + (roi.stop - roi.start)
            result[:] = aligned_result[roiToSlice(start, stop)]
Пример #12
0
    def _executeOutput(self, roi, destination):
        assert len(roi.stop) == len(self.Input.meta.shape), "roi: {} has the wrong number of dimensions for Input shape: {}".format( roi, self.Input.meta.shape )
        assert numpy.less_equal(roi.stop, self.Input.meta.shape).all(), "roi: {} is out-of-bounds for Input shape: {}".format( roi, self.Input.meta.shape )
        
        block_starts = getIntersectingBlocks( self._blockshape, (roi.start, roi.stop) )
        block_starts = map( tuple, block_starts )

        # Ensure all block cache files are up-to-date
        reqPool = RequestPool() # (Do the work in parallel.)
        for block_start in block_starts:
            entire_block_roi = getBlockBounds( self.Input.meta.shape, self._blockshape, block_start )
            f = partial( self._ensureCached, entire_block_roi)
            reqPool.add( Request(f) )
        logger.debug( "Waiting for {} blocks...".format( len(block_starts) ) )
        reqPool.wait()

        # Copy data from each block
        # (Parallelism not needed here: h5py will serialize these requests anyway)
        logger.debug( "Copying data from {} blocks...".format( len(block_starts) ) )
        for block_start in block_starts:
            entire_block_roi = getBlockBounds( self.Input.meta.shape, self._blockshape, block_start )

            # This block's portion of the roi
            intersecting_roi = getIntersection( (roi.start, roi.stop), entire_block_roi )
            
            # Compute slicing within destination array and slicing within this block
            destination_relative_intersection = numpy.subtract(intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(intersecting_roi, block_start)
            
            # Copy from block to destination
            dataset = self._getBlockDataset( entire_block_roi )
            destination[ roiToSlice(*destination_relative_intersection) ] = dataset[ roiToSlice( *block_relative_intersection ) ]
        return destination
Пример #13
0
    def _copyData(self, roi, destination, block_starts):
        # Copy data from each block
        # (Parallelism not needed here: h5py will serialize these requests anyway)
        logger.debug( "Copying data from {} blocks...".format( len(block_starts) ) )
        for block_start in block_starts:
            entire_block_roi = getBlockBounds( self.Output.meta.shape, self._blockshape, block_start )

            # This block's portion of the roi
            intersecting_roi = getIntersection( (roi.start, roi.stop), entire_block_roi )
            
            # Compute slicing within destination array and slicing within this block
            destination_relative_intersection = numpy.subtract(intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(intersecting_roi, block_start)
            destination_relative_intersection_slicing = roiToSlice(*destination_relative_intersection)
            block_relative_intersection_slicing = roiToSlice( *block_relative_intersection )
            
            # Copy from block to destination
            dataset = self._getBlockDataset( entire_block_roi )
            if self.Output.meta.has_mask:
                destination.data[ destination_relative_intersection_slicing ] = dataset["data"][ block_relative_intersection_slicing ]
                destination.mask[ destination_relative_intersection_slicing ] = dataset["mask"][ block_relative_intersection_slicing ]
                destination.fill_value = dataset["fill_value"][()]
            else:
                destination[ destination_relative_intersection_slicing ] = dataset[ block_relative_intersection_slicing ]
            self._last_access_times[block_start] = time.time()
Пример #14
0
    def execute(self, slot, subindex, roi, result):
        key = roiToSlice(roi.start, roi.stop)

        shape = self.inputs["Input"].meta.shape
        rstart, rstop = sliceToRoi(key, self.outputs["Output"].meta.shape)
        rstart.append(0)
        rstop.append(shape[-1])
        rkey = roiToSlice(rstart, rstop)
        img = self.inputs["Input"][rkey].allocate().wait()

        stop = img.size

        seg = []

        for i in range(0, stop, img.shape[-1]):
            curr_prob = -1
            highest_class = -1
            for c in range(img.shape[-1]):
                prob = img.ravel()[i + c]
                if prob > curr_prob:
                    curr_prob = prob
                    highest_class = c
            assert highest_class != -1, "OpSegmentation: Strange classes/probabilities"

            seg.append(highest_class)

        seg = numpy.array(seg)
        seg.resize(img.shape[:-1])

        return seg[:]
Пример #15
0
    def _retrieve_local_tile(self, rest_args, tile_relative_intersection, data_out):
        tile_path = self.description.tile_url_format.format( **rest_args )
        logger.debug("Opening {}".format( tile_path ))

        if not os.path.exists(tile_path):
            logger.error("Tile does not exist: {}".format( tile_path ))
            data_out[...] = 0
            return

        # Read the image from the disk with vigra
        img = vigra.impex.readImage(tile_path, dtype='NATIVE')
        assert img.ndim == 3
        if self.description.is_rgb:
            # "Convert" to grayscale -- just take first channel.
            img = img[...,0:1]
        assert img.shape[-1] == 1, "Image has more channels than expected.  "\
                                   "If it is RGB, be sure to set the is_rgb flag in your description json."
        
        # img has axes xyc, but we want zyx
        img = img.transpose()[None,0,:,:]

        if self.description.invert_y_axis:
            # More special Raveler support:
            # Raveler's conventions for the Y-axis are the reverse for everyone else's.
            img = img[:, ::-1, :]

        # Copy just the part we need into the destination array
        assert img[roiToSlice(*tile_relative_intersection)].shape == data_out.shape
        data_out[:] = img[roiToSlice(*tile_relative_intersection)]

        # If there's a special transform, apply it now.
        if self.description.data_transform_function is not None:
            transform = eval(self.description.data_transform_function)
            data_out[:] = transform(data_out)
Пример #16
0
    def getOutSlot(self, slot, key, result):
        start, stop = sliceToRoi(key, self.shape)
        
        diff = stop-start
        
        splitDim = numpy.argmax(diff[:-1])
        splitPos = start[splitDim] + diff[splitDim] / 2
        
        stop2 = stop.copy()
        stop2[splitDim] = splitPos
        start2 = start.copy()
        start2[splitDim] = splitPos
        
        
        destStart = start -start # zeros
        destStop = stop - start
        
        destStop2 = destStop.copy()
        destStop2[splitDim] = diff[splitDim] / 2
        destStart2 = destStart.copy()
        destStart2[splitDim] = diff[splitDim] / 2
        
        writeKey1 = roiToSlice(destStart,destStop2)        
        writeKey2 = roiToSlice(destStart2,destStop)        
        
        key1 = roiToSlice(start,stop2)
        key2 = roiToSlice(start2,stop)

        req1 = self.inputs["Input"][key1].writeInto(result[writeKey1])
        req2 = self.inputs["Input"][key2].writeInto(result[writeKey2])
        req1.wait()
        req2.wait()
Пример #17
0
    def execute(self, slot, subindex, rroi, result):
        key = roiToSlice(rroi.start, rroi.stop)
        index = subindex[0]
        # Index of the input slice this data will come from.
        sliceIndex = self.getSliceIndexes()[index]

        outshape = self.Slices[index].meta.shape
        start, stop = roi.sliceToRoi(key, outshape)

        start = list(start)
        stop = list(stop)

        flag = self.AxisFlag.value
        indexAxis = self.Input.meta.axistags.index(flag)

        start.pop(indexAxis)
        stop.pop(indexAxis)

        start.insert(indexAxis, sliceIndex)
        stop.insert(indexAxis, sliceIndex)

        newKey = roi.roiToSlice(numpy.array(start), numpy.array(stop))

        self.Input[newKey].writeInto(result).wait()
        return result
Пример #18
0
    def execute(self, slot, subindex, rroi, result):
        key = roiToSlice(rroi.start, rroi.stop)
        index = subindex[0]
        # Index of the input slice this data will come from.
        sliceIndex = self.getSliceIndexes()[index]

        outshape = self.outputs["Slices"][index].meta.shape
        start,stop=roi.sliceToRoi(key,outshape)
        oldstart,oldstop=start,stop

        start=list(start)
        stop=list(stop)

        flag=self.inputs["AxisFlag"].value
        indexAxis=self.inputs["Input"].meta.axistags.index(flag)

        start.pop(indexAxis)
        stop.pop(indexAxis)

        start.insert(indexAxis, sliceIndex)
        stop.insert(indexAxis, sliceIndex)

        newKey=roi.roiToSlice(numpy.array(start),numpy.array(stop))

        ttt = self.inputs["Input"][newKey].allocate().wait()
        return ttt[:]
Пример #19
0
    def _transferBlockDataHdf5(self, entire_block_roi, block_relative_roi, array_data, array_slicing, read, datasetPathComponents ):
        """
        Transfer a block of data to/from an hdf5 dataset.
        See _transferBlockData() for details.
        
        We use separate parameters for array_data and array_slicing to allow users to pass an hdf5 dataset for array_data.
        """
        # For the hdf5 format, the full path format INCLUDES the dataset name, e.g. /path/to/myfile.h5/volume/data
        path_parts = datasetPathComponents
        datasetDir = path_parts.externalDirectory
        hdf5FilePath = path_parts.externalPath
        if len(path_parts.internalPath) == 0:
            raise RuntimeError("Your hdf5 block filename format MUST specify an internal path, e.g. block{roiString}.h5/volume/blockdata")

        block_start = entire_block_roi[0]
        if read:
            # Check for problems before reading.
            if self.getBlockStatus( block_start ) is not BlockwiseFileset.BLOCK_AVAILABLE:
                raise BlockwiseFileset.BlockNotReadyError( block_start )

            hdf5File = self._getOpenHdf5Blockfile( hdf5FilePath )

            if self._description.dtype != object and isinstance(array_data, numpy.ndarray) and array_data.flags.c_contiguous:
                hdf5File[ path_parts.internalPath ].read_direct( array_data, roiToSlice( *block_relative_roi ), array_slicing )
            elif self._description.dtype == object:
                # We store arrays of dtype=object as arrays of pickle strings.
                array_pickled_data = hdf5File[ path_parts.internalPath ][ roiToSlice( *block_relative_roi ) ]
                array_data[ array_slicing ] = vectorized_pickle_loads(array_pickled_data)
            else:
                array_data[ array_slicing ] = hdf5File[ path_parts.internalPath ][ roiToSlice( *block_relative_roi ) ]
                
        else:
            # Create the directory
            if not os.path.exists( datasetDir ):
                os.makedirs( datasetDir )
                # For debug purposes, output a copy of the settings 
                #  that were active **when this block was created**
                descriptionFileName = os.path.split(self._descriptionFilePath)[1]
                debugDescriptionFileCopyPath = os.path.join(datasetDir, descriptionFileName)
                BlockwiseFileset.writeDescription(debugDescriptionFileCopyPath, self._description)

            # Clear the block status.
            # The CALLER is responsible for setting it again.
            self.setBlockStatus( block_start, BlockwiseFileset.BLOCK_NOT_AVAILABLE )

            # Write the block data file
            hdf5File = self._getOpenHdf5Blockfile( hdf5FilePath )
            if path_parts.internalPath not in hdf5File:
                self._createDatasetInFile( hdf5File, path_parts.internalPath, entire_block_roi )
            dataset = hdf5File[ path_parts.internalPath ]
            data = array_data[ array_slicing ]
            if data.dtype == object:
                # hdf5 can't handle datasets with dtype=object,
                #  so we have to pickle each item first.
                dataset[ roiToSlice( *block_relative_roi ) ] = vectorized_pickle_dumps(data)
            else:
                dataset[ roiToSlice( *block_relative_roi ) ] = data
Пример #20
0
        def execute(self, slot, roi, result):
            key = roi.toSlice()
            self.lock.acquire()
            assert(self.inputs["eraser"].connected() == True and self.inputs["shape"].connected() == True and self.inputs["blockShape"].connected()==True), \
            "OpDenseSparseArray:  One of the neccessary input slots is not connected: shape: %r, eraser: %r" % \
            (self.inputs["eraser"].connected(), self.inputs["shape"].connected())
            if slot.name == "Output":
                #result[:] = self._denseArray[key]
                #find the block key
                start, stop = sliceToRoi(key, self.shape)
                blockStart = (1.0 * start / self._blockShape).floor()
                blockStop = (1.0 * stop / self._blockShape).ceil()
                blockKey = roiToSlice(blockStart,blockStop)
                innerBlocks = self._blockNumbers[blockKey]
                if lazyflow.verboseRequests:
                    print "OpBlockedSparseLabelArray %r: request with key %r for %d inner Blocks " % (self,key, len(innerBlocks.ravel()))    
                for b_ind in innerBlocks.ravel():
                    #which part of the original key does this block fill?
                    offset = self._blockShape*self._flatBlockIndices[b_ind]
                    bigstart = numpy.maximum(offset, start)
                    bigstop = numpy.minimum(offset + self._blockShape, stop)
                
                    smallstart = bigstart-offset
                    smallstop = bigstop - offset
                    
                    bigkey = roiToSlice(bigstart-start, bigstop-start)
                    smallkey = roiToSlice(smallstart, smallstop)
                    if not b_ind in self._labelers:
                        result[bigkey]=0
                    else:
                        result[bigkey]=self._labelers[b_ind]._denseArray[smallkey]
            
            elif slot.name == "nonzeroValues":
                nzvalues = set()
                for l in self._labelers.values():
                    nzvalues |= set(l._sparseNZ.values())
                result[0] = numpy.array(list(nzvalues))

            elif slot.name == "nonzeroCoordinates":
                print "not supported yet"
                #result[0] = numpy.array(self._sparseNZ.keys())
            elif slot.name == "nonzeroBlocks":
                #we only return all non-zero blocks, no keys
                slicelist = []
                for b_ind in self._labelers.keys():
                    offset = self._blockShape*self._flatBlockIndices[b_ind]
                    bigstart = offset
                    bigstop = numpy.minimum(offset + self._blockShape, self.shape)                    
                    bigkey = roiToSlice(bigstart, bigstop)
                    slicelist.append(bigkey)
                
                result[0] = slicelist
                
                
            self.lock.release()
            return result
    def execute(self, slot, subindex, roi, result):
        key = roi.toSlice()
        self.lock.acquire()
        assert(self.inputs["eraser"].ready() == True and self.inputs["shape"].ready() == True and self.inputs["blockShape"].ready()==True), \
        "OpBlockedSparseLabelArray:  One of the neccessary input slots is not ready: shape: %r, eraser: %r" % \
        (self.inputs["eraser"].ready(), self.inputs["shape"].ready())
        if slot.name == "Output":
                #result[:] = self._denseArray[key]
                #find the block key
            start, stop = sliceToRoi(key, self._cacheShape)
            blockStart = (1.0 * start / self._blockShape).floor()
            blockStop = (1.0 * stop / self._blockShape).ceil()
            blockKey = roiToSlice(blockStart,blockStop)
            innerBlocks = self._blockNumbers[blockKey]
            for b_ind in innerBlocks.ravel():
                #which part of the original key does this block fill?
                offset = self._blockShape*self._flatBlockIndices[b_ind]
                bigstart = numpy.maximum(offset, start)
                bigstop = numpy.minimum(offset + self._blockShape, stop)

                smallstart = bigstart-offset
                smallstop = bigstop - offset

                bigkey = roiToSlice(bigstart-start, bigstop-start)
                smallkey = roiToSlice(smallstart, smallstop)
                if not b_ind in self._labelers or not self._labelers[b_ind].Output.ready():
                    result[bigkey]=0
                else:
                    try:
                        labeler = self._labelers[b_ind]
                        denseArray = labeler._denseArray[smallkey]
                        result[bigkey]= denseArray
                    except:
                        logger.error( "Exception in OpBlockedSparseLabelArray.execute, probably due to simultaneous calls to setInSlot() and execute()" )
                        logger.error( "labeler = {}".format( labeler ) )
                        logger.error( "denseArray = {}".format( denseArray ) )
                        logger.error( "result = {}".format( result ) )
                        raise

        elif slot.name == "nonzeroValues":
            nzvalues = set()
            for l in self._labelers.values():
                nzvalues |= set(l._sparseNZ.values())
            result[0] = numpy.array(list(nzvalues))

        elif slot.name == "nonzeroCoordinates":
            assert False, "not supported yet"
            #result[0] = numpy.array(self._sparseNZ.keys())
        elif slot.name == "nonzeroBlocks":
            #we only return all non-zero blocks, no keys
            result[0] = self._get_nonzero_blocks()
        elif slot.name == "maxLabel":
            result[0] = self._maxLabel

        self.lock.release()
        return result
Пример #22
0
    def _serialize(self, group, name, slot):
        logger.debug("Serializing BlockSlot: {}".format( self.name ))
        mygroup = group.create_group(name)
        num = len(self.blockslot)
        for index in range(num):
            subname = self.subname.format(index)
            subgroup = mygroup.create_group(subname)
            nonZeroBlocks = self.blockslot[index].value
            for blockIndex, slicing in enumerate(nonZeroBlocks):
                if not isinstance(slicing[0], slice):
                    slicing = roiToSlice(*slicing)

                block = self.slot[index][slicing].wait()
                blockName = 'block{:04d}'.format(blockIndex)

                if self._shrink_to_bb:
                    nonzero_coords = numpy.nonzero(block)
                    if len(nonzero_coords[0]) > 0:
                        block_start = sliceToRoi( slicing, (0,)*len(slicing) )[0]
                        block_bounding_box_start = numpy.array( list(map( numpy.min, nonzero_coords )) )
                        block_bounding_box_stop = 1 + numpy.array( list(map( numpy.max, nonzero_coords )) )
                        block_slicing = roiToSlice( block_bounding_box_start, block_bounding_box_stop )
                        bounding_box_roi = numpy.array([block_bounding_box_start, block_bounding_box_stop])
                        bounding_box_roi += block_start
                        
                        # Overwrite the vars that are written to the file
                        slicing = roiToSlice(*bounding_box_roi)
                        block = block[block_slicing]

                # If we have a masked array, convert it to a structured array so that h5py can handle it.
                if slot[index].meta.has_mask:
                    mygroup.attrs["meta.has_mask"] = True

                    block_group = subgroup.create_group(blockName)

                    if self.compression_level:
                        block_group.create_dataset("data",
                                                   data=block.data,
                                                   compression='gzip',
                                                   compression_opts=compression_level)
                    else:
                        block_group.create_dataset("data", data=block.data)
                        
                    block_group.create_dataset(
                        "mask",
                        data=block.mask,
                        compression="gzip",
                        compression_opts=2
                    )
                    block_group.create_dataset("fill_value", data=block.fill_value)

                    block_group.attrs['blockSlice'] = slicingToString(slicing)
                else:
                    subgroup.create_dataset(blockName, data=block)
                    subgroup[blockName].attrs['blockSlice'] = slicingToString(slicing)
def normalize_synapse_ids(current_slice, current_roi, previous_slice, previous_roi, maxLabel):
    current_roi = numpy.array(current_roi)
    intersection_roi = None
    if previous_roi is not None:
        previous_roi = numpy.array(previous_roi)
        current_roi_2d = current_roi[:, :-1]
        previous_roi_2d = previous_roi[:, :-1]
        intersection_roi = getIntersection( current_roi_2d, previous_roi_2d, assertIntersect=False )

    if intersection_roi is None or previous_slice is None or abs(int(current_roi[0,2]) - int(previous_roi[0,2])) > 1:
        # We want our synapse ids to be consecutive, so we do a proper relabeling.
        # If we could guarantee that the input slice was already consecutive, we could do this:
        # relabeled_current = numpy.where( current_slice, current_slice+maxLabel, 0 )
        # ... but that's not the case.

        current_unique_labels = numpy.unique(current_slice)
        assert current_unique_labels[0] == 0, "This function assumes that not all pixels belong to detections."
        if len(current_unique_labels) == 1:
            # No objects in this slice.
            return current_slice, maxLabel
        max_current_label = current_unique_labels[-1]
        relabel = numpy.zeros( (max_current_label+1,), dtype=numpy.uint32 )
        new_max = maxLabel + len(current_unique_labels)-1
        relabel[(current_unique_labels[1:],)] = numpy.arange( maxLabel+1, new_max+1, dtype=numpy.uint32 )
        return relabel[current_slice], new_max
    
    # Extract the intersecting region from the current/prev slices,
    #  so its easy to compare corresponding pixels
    current_intersection_roi = numpy.subtract(intersection_roi, current_roi_2d[0])
    prev_intersection_roi = numpy.subtract(intersection_roi, previous_roi_2d[0])    
    current_intersection_slice = current_slice[roiToSlice(*current_intersection_roi)]
    prev_intersection_slice = previous_slice[roiToSlice(*prev_intersection_roi)]

    # omit label 0
    previous_slice_objects = numpy.unique(previous_slice)[1:]
    current_slice_objects = numpy.unique(current_slice)[1:]
    max_current_object = max(0, *current_slice_objects)
    relabel = numpy.zeros((max_current_object+1,), dtype=numpy.uint32)
    
    for cc in previous_slice_objects:
        current_labels = numpy.unique(current_intersection_slice[prev_intersection_slice==cc].flat)
        for cur_label in current_labels:
            relabel[cur_label] = cc
    
    for cur_object in current_slice_objects:
        if relabel[cur_object]==0:
            relabel[cur_object] = maxLabel+1
            maxLabel=maxLabel+1

    relabel[0] = 0

    # Relabel the entire current slice
    relabeled_slice_objects = relabel[current_slice]
    return relabeled_slice_objects, maxLabel
Пример #24
0
    def execute(self, slot, subindex, roi, result):
        key = roiToSlice(roi.start,roi.stop)
        shape = self.inputs["Input"].meta.shape

        rstart, rstop = sliceToRoi(key, self.outputs["Output"].meta.shape)
        rstart[-1] = 0
        rstop[-1] = shape[-1]
        rkey = roiToSlice(rstart, rstop)
        img = self.inputs["Input"][rkey].wait()
        axis = img.ndim - 1
        result = numpy.argmax(img, axis=axis)
        result.resize(result.shape + (1,))
        return result
Пример #25
0
    def testCompressed(self):
        graph = Graph()
        opDataProvider = OpArrayPiperWithAccessCount( graph=graph )
        opCache = OpUnblockedArrayCache( graph=graph )
        opCache.CompressionEnabled.setValue(True)
        
        data = np.random.random( (100,100,100) ).astype(np.float32)
        opDataProvider.Input.setValue( vigra.taggedView( data, 'zyx' ) )
        opCache.Input.connect( opDataProvider.Output )
        
        roi = ((30, 30, 30), (50, 50, 50))
        cache_data = opCache.Output( *roi ).wait()
        assert (cache_data == data[roiToSlice(*roi)]).all()
        assert opDataProvider.accessCount == 1

        # Request the same data a second time.
        # Access count should not change.
        cache_data = opCache.Output( *roi ).wait()
        assert (cache_data == data[roiToSlice(*roi)]).all()
        assert opDataProvider.accessCount == 1
        
        # Now invalidate a part of the data
        # The cache will discard it, so the access count should increase.
        opDataProvider.Input.setDirty( (30, 30, 30), (31, 31, 31) )
        cache_data = opCache.Output( *roi ).wait()
        assert (cache_data == data[roiToSlice(*roi)]).all()
        assert opDataProvider.accessCount == 2
                
        # Repeat this next part just for safety
        for _ in range(10):
            # Make sure the cache is empty
            opDataProvider.Input.setDirty( (30, 30, 30), (31, 31, 31) )
            opDataProvider.accessCount = 0

            # Create many requests for the same data.
            # Upstream data should only be accessed ONCE.
            pool = RequestPool()
            for _ in range(10):
                pool.add( opCache.Output( *roi ) )
            pool.wait()
            assert opDataProvider.accessCount == 1

        # Also, make sure requests for INNER rois of stored blocks are also serviced from memory
        opDataProvider.accessCount = 0
        inner_roi = ((35, 35, 35), (45, 45, 45))
        cache_data = opCache.Output( *inner_roi ).wait()
        assert (cache_data == data[roiToSlice(*inner_roi)]).all()
        assert opDataProvider.accessCount == 0
    def _executeBlockwiseRegionFeatures(self, roi, destination):
        """
        Provide data for the BlockwiseRegionFeatures slot.
        Note: Each block produces a single element of this slot's output.  Construct requested roi coordinates accordingly.
              e.g. if block_shape is (1,10,10,10,1), the features for the block starting at 
                   (1,20,30,40,5) should be requested via roi [(1,2,3,4,5),(2,3,4,5,6)]
        
        Note: It is assumed that you will request these features for debug purposes, AFTER requesting the prediction image.
              Therefore, it is considered an error to request features that are not already computed.
        """
        axiskeys = self.RawImage.meta.getAxisKeys()
        # Find the corresponding block start coordinates
        block_shape = self._getFullShape( self.BlockShape3dDict.value )
        pixel_roi = numpy.array(block_shape) * (roi.start, roi.stop)
        block_starts = getIntersectingBlocks( block_shape, pixel_roi )
        block_starts = map( tuple, block_starts )
        
        for block_start in block_starts:
            assert block_start in self._blockPipelines, "Not allowed to request region features for blocks that haven't yet been processed." # See note above

            # Discard spatial axes to get (t,c) index for region slot roi
            tagged_block_start = zip( axiskeys, block_start )
            tagged_block_start_tc = filter( lambda (k,v): k in 'tc', tagged_block_start )
            block_start_tc = map( lambda (k,v): v, tagged_block_start_tc )
            block_roi_tc = ( block_start_tc, block_start_tc + numpy.array([1,1]) )

            destination_start = numpy.array(block_start) / block_shape - roi.start
            destination_stop = destination_start + numpy.array( [1]*len(axiskeys) )

            opBlockPipeline = self._blockPipelines[block_start]
            req = opBlockPipeline.BlockwiseRegionFeatures( *block_roi_tc )
            req.writeInto( destination[ roiToSlice( destination_start, destination_stop ) ] )
            req.wait()
        
        return destination
    def _executePredictionImage(self, roi, destination):
        # Determine intersecting blocks
        block_shape = self._getFullShape( self.BlockShape3dDict.value )
        block_starts = getIntersectingBlocks( block_shape, (roi.start, roi.stop) )
        block_starts = map( tuple, block_starts )

        # Ensure that block pipelines exist (create first if necessary)
        for block_start in block_starts:
            self._ensurePipelineExists(block_start)

        # Retrieve result from each block, and write into the approprate region of the destination
        # TODO: Parallelize this loop
        for block_start in block_starts:
            opBlockPipeline = self._blockPipelines[block_start]
            block_roi = opBlockPipeline.block_roi
            block_intersection = getIntersection( block_roi, (roi.start, roi.stop) )
            block_relative_intersection = numpy.subtract(block_intersection, block_roi[0])
            destination_relative_intersection = numpy.subtract(block_intersection, roi.start)
            
            destination_slice = roiToSlice( *destination_relative_intersection )
            req = opBlockPipeline.PredictionImage( *block_relative_intersection )
            req.writeInto( destination[destination_slice] )
            req.wait()

        return destination
Пример #28
0
    def propagateDirty(self,slot,subindex,roi):
   
        if slot == self.Input:
            channelAxis = self.Input.meta.axistags.index('c')
            numChannels = self.Input.meta.shape[channelAxis]
            dirtyChannels = roi.stop[channelAxis] - roi.start[channelAxis]
            
            # If all the input channels were dirty, the dirty output region is a contiguous block
            if dirtyChannels == numChannels:
                dirtyKey = roiToSlice(roi.start, roi.stop)
                dirtyKey[channelAxis] = slice(None)
                dirtyRoi = sliceToRoi(dirtyKey, self.Output.meta.shape)
                self.Output.setDirty(dirtyRoi[0], dirtyRoi[1])
            else:
                # Only some input channels were dirty,
                # so we must mark each dirty output region separately.
                numFeatures = self.Output.meta.shape[channelAxis] / numChannels
                for featureIndex in range(numFeatures):
                    startChannel = numChannels*featureIndex + roi.start[channelAxis]
                    stopChannel = startChannel + roi.stop[channelAxis]
                    dirtyRoi = copy.copy(roi)
                    dirtyRoi.start[channelAxis] = startChannel
                    dirtyRoi.stop[channelAxis] = stopChannel
                    self.Output.setDirty(dirtyRoi)

        elif (slot == self.Matrix
              or slot == self.Scales
              or slot == self.FeatureIds):
            self.Output.setDirty(slice(None))
        else:
            assert False, "Unknown dirty input slot."
Пример #29
0
    def execute(self, slot, subindex, roi, result):
        key = roiToSlice(roi.start,roi.stop)

        matrix = self.inputs["Input"][key].allocate().wait()
        matrix = self.function(matrix)

        return matrix[:]
Пример #30
0
    def execute(self, slot, subindex, roi, result):
        key = roiToSlice(roi.start,roi.stop)

        start = self.inputs["Start"].value
        stop = self.inputs["Stop"].value

        temp = tuple()
        for i in xrange(len(start)):
            if stop[i] - start[i] > 0:
                temp += (stop[i]-start[i],)

        readStart, readStop = sliceToRoi(key, temp)



        newKey = ()
        resultKey = ()
        i = 0
        i2 = 0
        for kkk in xrange(len(start)):
            e = stop[kkk] - start[kkk]
            if e > 0:
                newKey += (slice(start[i2] + readStart[i], start[i2] + readStop[i],None),)
                resultKey += (slice(0,temp[i2],None),)
                i +=1
            else:
                newKey += (slice(start[i2], start[i2], None),)
                resultKey += (0,)
            i2 += 1

        res = self.inputs["Input"][newKey].allocate().wait()
        result[:] = res[resultKey]
Пример #31
0
    def _setInSlotInputHdf5(self, slot, subindex, roi, value):
        logger.debug("Setting block {} from hdf5".format( roi ))
        if self.Output.meta.has_mask:
            assert isinstance( value, h5py.Group ), "InputHdf5 slot requires an hdf5 Group to copy from (not a numpy masked array)."
        else:
            assert isinstance( value, h5py.Dataset ), "InputHdf5 slot requires an hdf5 Dataset to copy from (not a numpy array)."

        block_roi = getBlockBounds( self.Output.meta.shape, self._blockshape, roi.start )

        roi_is_exactly_one_block = True
        roi_is_exactly_one_block &= ((roi.start % self._blockshape) == 0).all()
        roi_is_exactly_one_block &= (block_roi == numpy.array((roi.start, roi.stop))).all()
        if roi_is_exactly_one_block:
            cachefile = self._getCacheFile( block_roi )
            logger.debug( "Copying HDF5 data directly into block {}".format( block_roi ) )

            if self.Output.meta.has_mask:
                assert len(value) == 3

                for each in ["data", "mask", "fill_value"]:
                    assert each in value
                    assert cachefile[each].dtype == value[each].dtype
                    assert cachefile[each].shape == value[each].shape

                for each in ["data", "mask", "fill_value"]:
                    del cachefile[each]
                    cachefile.copy( value[each], each )
            else:
                assert cachefile['data'].dtype == value.dtype
                assert cachefile['data'].shape == value.shape
                del cachefile['data']
                cachefile.copy( value, 'data' )

            block_start = tuple(roi.start)
            self._dirtyBlocks.discard( block_start )
        else:
            # This hdf5 data does not correspond to exactly one block.
            # We must uncompress it and write it the "normal" way (the slow way)
            # FIXME: This would use less memory if we uncompressed the data block-by-block
            data = None

            if self.Output.meta.has_mask:
                data = numpy.ma.masked_array(
                    value["data"][()],
                    mask=value["mask"][()],
                    fill_value=value["fill_value"][()],
                    shrink=False
                )
            else:
                data = value[()]

            self.Input[roiToSlice(roi.start, roi.stop)] = data
Пример #32
0
    def setInSlot(self, slot, subindex, roi, value):
        assert slot == self.inputs["Input"]
        ch = self._cacheHits
        ch += 1
        self._cacheHits = ch
        start, stop = roi.start, roi.stop
        blockStart = numpy.ceil(1.0 * start / self._blockShape)
        blockStop = numpy.floor(1.0 * stop / self._blockShape)
        blockStop = numpy.where(stop == self.Output.meta.shape, self._dirtyShape, blockStop)
        blockKey = roiToSlice(blockStart,blockStop)

        if (self._blockState[blockKey] != OpArrayCache.CLEAN).any():
            start2 = blockStart * self._blockShape
            stop2 = blockStop * self._blockShape
            stop2 = numpy.minimum(stop2, self.Output.meta.shape)
            key2 = roiToSlice(start2,stop2)
            with self._lock:
                if self._cache is None:
                    self._allocateCache()
                self._cache[key2] = value[roiToSlice(start2-start,stop2-start)]
                self._blockState[blockKey] = self._dirtyState
                self._blockQuery[blockKey] = None
Пример #33
0
        def request( self, slicing ):
            if cfg.getboolean('pixelpipeline', 'verbose'):
                volumina.printLock.acquire()
                print "  LazyflowSource '%s' requests %s" % (self.objectName(), volumina.strSlicing(slicing))
                volumina.printLock.release()
            if not is_pure_slicing(slicing):
                raise Exception('LazyflowSource: slicing is not pure')
            assert self._op5 is not None, "Underlying operator is None.  Are you requesting from a datasource that has been cleaned up already?"

            start, stop = sliceToRoi(slicing, self._op5.Output.meta.shape)
            clipped_roi = np.maximum(start, (0,0,0,0,0)), np.minimum(stop, self._op5.Output.meta.shape)
            clipped_slicing = roiToSlice(*clipped_roi)
            return LazyflowRequest( self._op5, clipped_slicing, self._priority, objectName=self.objectName() )
Пример #34
0
    def execute(self, slot, subindex, roi, result):
        key = roiToSlice(roi.start,roi.stop)
        requests=[]
        for input in self.inputs["Inputs"]:
            requests.append(input[key])

        data=[]
        for req in requests:
            data.append(req.wait())
        
        fun=self.inputs["MergingFunction"].value

        return fun(data)
Пример #35
0
    def execute(self, slot, subindex, roi, result):
        index = self.inputs["Index"].value
        channelIndex = self.Input.meta.axistags.channelIndex
        assert self.inputs["Input"].meta.shape[channelIndex] > index, \
            "Requested channel, {}, is out of Range (input shape is {})".format( index, self.Input.meta.shape )

        # Only ask for the channel we need
        key = roiToSlice(roi.start, roi.stop)
        newKey = list(key)
        newKey[channelIndex] = slice(index, index + 1, None)
        #newKey = key[:-1] + (slice(index,index+1),)
        self.inputs["Input"][tuple(newKey)].writeInto(result).wait()
        return result
Пример #36
0
    def testBasic(self):
        graph = Graph()
        opDataProvider = OpArrayPiperWithAccessCount(graph=graph)
        opCache = OpUnblockedArrayCache(graph=graph)

        data = np.random.random((100, 100, 100)).astype(np.float32)
        opDataProvider.Input.setValue(vigra.taggedView(data, 'zyx'))
        opCache.Input.connect(opDataProvider.Output)

        assert opCache.CleanBlocks.value == []

        roi = ((30, 30, 30), (50, 50, 50))
        cache_data = opCache.Output(*roi).wait()
        assert (cache_data == data[roiToSlice(*roi)]).all()
        assert opDataProvider.accessCount == 1
        assert opCache.CleanBlocks.value == [roiToSlice(*roi)]

        # Request the same data a second time.
        # Access count should not change.
        cache_data = opCache.Output(*roi).wait()
        assert (cache_data == data[roiToSlice(*roi)]).all()
        assert opDataProvider.accessCount == 1
        assert opCache.CleanBlocks.value == [roiToSlice(*roi)]

        # Now invalidate a part of the data
        # The cache will discard it, so the access count should increase.
        opDataProvider.Input.setDirty((30, 30, 30), (31, 31, 31))
        assert opCache.CleanBlocks.value == []
        cache_data = opCache.Output(*roi).wait()
        assert (cache_data == data[roiToSlice(*roi)]).all()
        assert opDataProvider.accessCount == 2

        # Repeat this next part just for safety
        for _ in range(10):
            # Make sure the cache is empty
            opDataProvider.Input.setDirty((30, 30, 30), (31, 31, 31))
            opDataProvider.accessCount = 0

            # Create many requests for the same data.
            # Upstream data should only be accessed ONCE.
            pool = RequestPool()
            for _ in range(10):
                pool.add(opCache.Output(*roi))
            pool.wait()
            assert opDataProvider.accessCount == 1

        # Also, make sure requests for INNER rois of stored blocks are also serviced from memory
        opDataProvider.accessCount = 0
        inner_roi = ((35, 35, 35), (45, 45, 45))
        cache_data = opCache.Output(*inner_roi).wait()
        assert (cache_data == data[roiToSlice(*inner_roi)]).all()
        assert opDataProvider.accessCount == 0
        assert opCache.CleanBlocks.value == [roiToSlice(*roi)]
Пример #37
0
    def _copyData(self, roi, destination, block_starts):
        # Copy data from each block
        # (Parallelism not needed here: h5py will serialize these requests anyway)
        logger.debug("Copying data from {} blocks...".format(
            len(block_starts)))
        for block_start in block_starts:
            entire_block_roi = getBlockBounds(self.Output.meta.shape,
                                              self._blockshape, block_start)

            # This block's portion of the roi
            intersecting_roi = getIntersection((roi.start, roi.stop),
                                               entire_block_roi)

            # Compute slicing within destination array and slicing within this block
            destination_relative_intersection = numpy.subtract(
                intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(
                intersecting_roi, block_start)
            destination_relative_intersection_slicing = roiToSlice(
                *destination_relative_intersection)
            block_relative_intersection_slicing = roiToSlice(
                *block_relative_intersection)

            # Copy from block to destination
            dataset = self._getBlockDataset(entire_block_roi)
            if self.Output.meta.has_mask:
                destination.data[
                    destination_relative_intersection_slicing] = dataset[
                        "data"][block_relative_intersection_slicing]
                destination.mask[
                    destination_relative_intersection_slicing] = dataset[
                        "mask"][block_relative_intersection_slicing]
                destination.fill_value = dataset["fill_value"][()]
            else:
                destination[
                    destination_relative_intersection_slicing] = dataset[
                        block_relative_intersection_slicing]
            self._last_access_times[block_start] = time.time()
Пример #38
0
    def _retrieve_local_tile(self, rest_args, tile_relative_intersection,
                             data_out):
        tile_path = self.description.tile_url_format.format(**rest_args)
        logger.debug("Opening {}".format(tile_path))

        if not os.path.exists(tile_path):
            logger.error("Tile does not exist: {}".format(tile_path))
            data_out[...] = 0
            return

        # Read the image from the disk with vigra
        img = vigra.impex.readImage(tile_path, dtype="NATIVE")
        assert img.ndim == 3
        if self.description.is_rgb:
            # "Convert" to grayscale -- just take first channel.
            img = img[..., 0:1]
        assert img.shape[-1] == 1, (
            "Image has more channels than expected.  "
            "If it is RGB, be sure to set the is_rgb flag in your description json."
        )

        # img has axes xyc, but we want zyx
        img = img.transpose()[None, 0, :, :]

        if self.description.invert_y_axis:
            # More special Raveler support:
            # Raveler's conventions for the Y-axis are the reverse for everyone else's.
            img = img[:, ::-1, :]

        # Copy just the part we need into the destination array
        assert img[roiToSlice(
            *tile_relative_intersection)].shape == data_out.shape
        data_out[:] = img[roiToSlice(*tile_relative_intersection)]

        # If there's a special transform, apply it now.
        if self.description.data_transform_function is not None:
            transform = eval(self.description.data_transform_function)
            data_out[:] = transform(data_out)
    def _copyData(self, roi, destination, block_starts):
        """
        Copy data from each block into the destination array.
        For blocks that aren't currently stored, just write zeros.
        """
        # (Parallelism not needed here: h5py will serialize these requests anyway)
        block_starts = list(map(tuple, block_starts))
        for block_start in block_starts:
            entire_block_roi = getBlockBounds(self.Output.meta.shape, self._blockshape, block_start)

            # This block's portion of the roi
            intersecting_roi = getIntersection((roi.start, roi.stop), entire_block_roi)

            # Compute slicing within destination array and slicing within this block
            destination_relative_intersection = numpy.subtract(intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(intersecting_roi, block_start)
            destination_relative_intersection_slicing = roiToSlice(*destination_relative_intersection)
            block_relative_intersection_slicing = roiToSlice(*block_relative_intersection)

            if block_start in self._cacheFiles:
                # Copy from block to destination
                dataset = self._getBlockDataset(entire_block_roi)

                if self.Output.meta.has_mask:
                    destination[destination_relative_intersection_slicing] = dataset["data"][
                        block_relative_intersection_slicing
                    ]
                    destination.mask[destination_relative_intersection_slicing] = dataset["mask"][
                        block_relative_intersection_slicing
                    ]
                    destination.fill_value = dataset["fill_value"][()]
                else:
                    destination[destination_relative_intersection_slicing] = dataset[
                        block_relative_intersection_slicing
                    ]
            else:
                # Not stored yet.  Overwrite with zeros.
                destination[destination_relative_intersection_slicing] = 0
Пример #40
0
    def _executeCurrentRavelerObjectRemainder(self, roi, result):        
        # Start with the original raveler object
        self._opSelectRavelerObject.Output(roi.start, roi.stop).writeInto(result).wait()

        lut = self._opFragmentSetLutCache.Output[:].wait()

        # Save memory: Implement (A - B) == (A & ~B), and do it with in-place operations
        slicing = roiToSlice( roi.start[1:4], roi.stop[1:4] )
        a = result[0,...,0]
        b = lut[self._mst.regionVol[slicing]] # (Advanced indexing)
        numpy.logical_not( b, out=b ) # ~B
        numpy.logical_and(a, b, out=a) # A & ~B
        
        return result
Пример #41
0
    def request(self, slicing):
        if CONFIG.verbose_pixelpipeline:
            logger.info("%s '%s' requests %s'", type(self).__name__, self.objectName(), strSlicing(slicing))

        if not is_pure_slicing(slicing):
            raise Exception("LazyflowSource: slicing is not pure")
        assert (
            self._op5 is not None
        ), "Underlying operator is None.  Are you requesting from a datasource that has been cleaned up already?"

        start, stop = sliceToRoi(slicing, self._op5.Output.meta.shape)
        clipped_roi = np.maximum(start, (0, 0, 0, 0, 0)), np.minimum(stop, self._op5.Output.meta.shape)
        clipped_slicing = roiToSlice(*clipped_roi)
        return LazyflowRequest(self._op5, clipped_slicing, self._priority, objectName=self.objectName())
Пример #42
0
    def propagateDirty(self, slot, subindex, roi):
        shape = self.Input.meta.shape
        key = roi.toSlice()

        if slot == self.inputs["Input"]:
            start, stop = sliceToRoi(key, shape)

            with self._lock:
                if self._blockState is not None:
                    blockStart = numpy.floor(1.0 * start / self._blockShape)
                    blockStop = numpy.ceil(1.0 * stop / self._blockShape)
                    blockKey = roiToSlice(blockStart,blockStop)
                    if self._fixed:
                        # Remember that this block became dirty while we were fixed 
                        #  so we can notify downstream operators when we become unfixed.
                        self._blockState[blockKey] = OpArrayCache.FIXED_DIRTY
                        self._has_fixed_dirty_blocks = True
                    else:
                        self._blockState[blockKey] = OpArrayCache.DIRTY

            if not self._fixed:
                self.outputs["Output"].setDirty(key)
        if slot == self.inputs["fixAtCurrent"]:
            if self.inputs["fixAtCurrent"].ready():
                self._fixed = self.inputs["fixAtCurrent"].value
                if not self._fixed and self.Output.meta.shape is not None and self._has_fixed_dirty_blocks:
                    # We've become unfixed, so we need to notify downstream 
                    #  operators of every block that became dirty while we were fixed.
                    # Convert all FIXED_DIRTY states into DIRTY states
                    with self._lock:
                        cond = (self._blockState[...] == OpArrayCache.FIXED_DIRTY)
                        self._blockState[...]  = fastWhere(cond, OpArrayCache.DIRTY, self._blockState, numpy.uint8)
                        self._has_fixed_dirty_blocks = False
                    newDirtyBlocks = numpy.transpose(numpy.nonzero(cond))
                    
                    # To avoid lots of setDirty notifications, we simply merge all the dirtyblocks into one single superblock.
                    # This should be the best option in most cases, but could be bad in some cases.
                    # TODO: Optimize this by merging the dirty blocks via connected components or something.
                    cacheShape = numpy.array(self.Output.meta.shape)
                    dirtyStart = cacheShape
                    dirtyStop = [0] * len(cacheShape)
                    for index in newDirtyBlocks:
                        blockStart = index * self._blockShape
                        blockStop = numpy.minimum(blockStart + self._blockShape, cacheShape)
                        
                        dirtyStart = numpy.minimum(dirtyStart, blockStart)
                        dirtyStop = numpy.maximum(dirtyStop, blockStop)

                    if len(newDirtyBlocks > 0):
                        self.Output.setDirty( dirtyStart, dirtyStop )
Пример #43
0
    def test(self):
        # Retrieve from server
        graph = Graph()
        opRoi = OpDvidRoi(TEST_DVID_SERVER,
                          self.uuid,
                          self.roi_name,
                          graph=graph)
        roi_vol = opRoi.Output((0, 0, 0), self.expected_data.shape).wait()
        assert (roi_vol == self.expected_data).all()

        # Test a non-aligned roi
        subvol = ((30, 60, 50), (40, 70, 70))
        roi_vol = opRoi.Output(*subvol).wait()
        assert (roi_vol == self.expected_data[roiToSlice(*subvol)]).all()
Пример #44
0
    def execute(self, slot, subindex, roi, result):
        with self._lock:
            # Does this roi happen to fit ENTIRELY within an existing stored block?
            outer_rois = containing_rois(self._block_data.keys(),
                                         (roi.start, roi.stop))
            if len(outer_rois) > 0:
                # Use the first one we found
                block_roi = self._standardize_roi(*outer_rois[0])
                block_relative_roi = numpy.array(
                    (roi.start, roi.stop)) - block_roi[0]
                self.Output.stype.copy_data(
                    result, self._block_data[block_roi][roiToSlice(
                        *block_relative_roi)])
                return

        # Standardize roi for usage as dict key
        block_roi = self._standardize_roi(roi.start, roi.stop)

        # Get lock for this block (create first if necessary)
        with self._lock:
            if block_roi not in self._block_locks:
                self._block_locks[block_roi] = RequestLock()
            block_lock = self._block_locks[block_roi]

        # Handle identical simultaneous requests
        with block_lock:
            try:
                self.Output.stype.copy_data(result,
                                            self._block_data[block_roi])
                return
            except KeyError:  # Not yet stored: Request it now.

                # We attach a special attribute to the array to allow the upstream operator
                #  to optionally tell us not to bother caching the data.
                self.Input(roi.start, roi.stop).writeInto(result).block()

                if self.Input.meta.dontcache:
                    # The upstream operator says not to bother caching the data.
                    # (For example, see OpCacheFixer.)
                    return

                block = result.copy()
                with self._lock:
                    # Store the data.
                    # First double-check that the block wasn't removed from the
                    #   cache while we were requesting it.
                    # (Could have happened via propagateDirty() or eventually the arrayCacheMemoryMgr)
                    if block_roi in self._block_locks:
                        self._block_data[block_roi] = block
            self._last_access_times[block_roi] = time.time()
Пример #45
0
    def test_Roi_default_order(self):
        for i in range(self.tests):
            self.prepareVolnOp()
            shape = self.operator.Output.meta.shape
            roi = [None, None]
            roi[1] = [
                numpy.random.randint(2, s) if s != 1 else 1 for s in shape
            ]
            roi[0] = [
                numpy.random.randint(0, roi[1][i]) if s != 1 else 0
                for i, s in enumerate(shape)
            ]
            roi[0] = TinyVector(roi[0])
            roi[1] = TinyVector(roi[1])
            result = self.operator.Output(roi[0], roi[1]).wait()
            logger.debug(
                "------------------------------------------------------")
            logger.debug("self.array.shape = " + str(self.array.shape))
            logger.debug("type(input) == " +
                         str(type(self.operator.Input.value)))
            logger.debug("input.shape == " +
                         str(self.operator.Input.meta.shape))
            logger.debug("Input Tags:")
            logger.debug(str(self.operator.Input.meta.axistags))
            logger.debug("Output Tags:")
            logger.debug(str(self.operator.Output.meta.axistags))
            logger.debug("roi= " + str(roi))
            logger.debug("type(result) == " + str(type(result)))
            logger.debug("result.shape == " + str(result.shape))
            logger.debug(
                "------------------------------------------------------")

            # Check the shape
            assert len(result.shape) == 5
            assert not isinstance(
                result, vigra.VigraArray
            ), "For compatibility with generic code, output should be provided as a plain numpy array."

            # Ensure the result came out in volumina order
            assert self.operator.Output.meta.axistags == vigra.defaultAxistags(
                "tzyxc")

            # Check the data
            vresult = result.view(vigra.VigraArray)
            vresult.axistags = self.operator.Output.meta.axistags
            reorderedInput = self.inArray.withAxes(
                *[tag.key for tag in self.operator.Output.meta.axistags])
            assert numpy.all(
                vresult == reorderedInput[roiToSlice(roi[0], roi[1])])
Пример #46
0
        def test_Roi_default_order(self):
            for i in range(self.tests):
                self.prepareVolnOp()
                shape = self.operator.outputs["output"].meta.shape
                roi = [None, None]
                roi[1] = [
                    numpy.random.randint(2, s) if s != 1 else 1 for s in shape
                ]
                roi[0] = [
                    numpy.random.randint(0, roi[1][i]) if s != 1 else 0
                    for i, s in enumerate(shape)
                ]
                roi[0] = TinyVector(roi[0])
                roi[1] = TinyVector(roi[1])
                result = self.operator.outputs["output"](roi[0], roi[1]).wait()
                logger.debug(
                    '------------------------------------------------------')
                logger.debug("self.array.shape = " + str(self.array.shape))
                logger.debug("type(input) == " +
                             str(type(self.operator.input.value)))
                logger.debug("input.shape == " +
                             str(self.operator.input.meta.shape))
                logger.debug("Input Tags:")
                logger.debug(str(self.operator.inputs['input'].meta.axistags))
                logger.debug("Output Tags:")
                logger.debug(str(self.operator.output.meta.axistags))
                logger.debug("roi= " + str(roi))
                logger.debug("type(result) == " + str(type(result)))
                logger.debug("result.shape == " + str(result.shape))
                logger.debug(
                    '------------------------------------------------------')

                # Check the shape
                assert len(result.shape) == 5

                # Ensure the result came out in volumina order
                assert self.operator.outputs[
                    "output"].meta.axistags == vigra.defaultAxistags('txyzc')

                # Check the data
                vresult = result.view(vigra.VigraArray)
                vresult.axistags = self.operator.outputs[
                    "output"].meta.axistags
                reorderedInput = self.inArray.withAxes(*[
                    tag.key
                    for tag in self.operator.outputs["output"].meta.axistags
                ])
                assert numpy.all(
                    vresult == reorderedInput[roiToSlice(roi[0], roi[1])])
    def execute(self, slot, subindex, rroi, result):
        key = roiToSlice(rroi.start, rroi.stop)
        index = subindex[0]
        #print "SLICER: key", key, "indexes[0]", indexes[0], "result", result.shape
        start, stop = sliceToRoi(key, self.outputs["Slices"][index].meta.shape)

        start = list(start)
        stop = list(stop)

        flag = self.inputs["AxisFlag"].value
        indexAxis = self.inputs["Input"].meta.axistags.index(flag)

        start.insert(indexAxis, index)
        stop.insert(indexAxis, index)

        newKey = roiToSlice(numpy.array(start), numpy.array(stop))

        ttt = self.inputs["Input"][newKey].wait()

        writeKey = [slice(None, None, None) for k in key]
        writeKey.insert(indexAxis, 0)
        writeKey = tuple(writeKey)

        return ttt[writeKey]  #+ (0,)]
Пример #48
0
    def _copyData(self, roi, destination, block_starts):
        # Copy data from each block
        # (Parallelism not needed here: h5py will serialize these requests anyway)
        logger.debug("Copying data from {} blocks...".format(
            len(block_starts)))
        for block_start in block_starts:
            entire_block_roi = getBlockBounds(self.Output.meta.shape,
                                              self._blockshape, block_start)

            # This block's portion of the roi
            intersecting_roi = getIntersection((roi.start, roi.stop),
                                               entire_block_roi)

            # Compute slicing within destination array and slicing within this block
            destination_relative_intersection = numpy.subtract(
                intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(
                intersecting_roi, block_start)

            # Copy from block to destination
            dataset = self._getBlockDataset(entire_block_roi)
            destination[roiToSlice(
                *destination_relative_intersection)] = dataset[roiToSlice(
                    *block_relative_intersection)]
Пример #49
0
    def execute(self, slot, subindex, roi, result):
        key = roiToSlice(roi.start,roi.stop)

        self.lock.acquire()
        assert(self.inputs["eraser"].ready() == True and self.inputs["shape"].ready() == True), "OpDenseSparseArray:  One of the neccessary input slots is not ready: shape: %r, eraser: %r" % (self.inputs["eraser"].ready(), self.inputs["shape"].ready())
        if slot.name == "Output":
            result[:] = self._denseArray[key]
        elif slot.name == "nonzeroValues":
            result[0] = numpy.array(self._sparseNZ.values())
        elif slot.name == "nonzeroCoordinates":
            result[0] = numpy.array(self._sparseNZ.keys())
        elif slot.name == "maxLabel":
            result[0] = self._maxLabel
        self.lock.release()
        return result
Пример #50
0
 def execute(self, slot, subindex, roi, destination):
     if slot == self.Output:
         destination[:] = self._cache[roiToSlice(roi.start, roi.stop)]
     elif slot == self.MaxLabelValue:
         # FIXME: Don't hard-code this
         destination[0] = 2
     elif slot == self.NonzeroBlocks:
         # Only one block, the bounding box for all non-zero values.
         # This is efficient if the labels are very close to eachother,
         #  but slow if the labels are far apart.
         nonzero_coords = numpy.nonzero(self._cache)
         if len(nonzero_coords) > 0 and len(nonzero_coords[0]) > 0:
             bounding_box_start = numpy.array(map(numpy.min,
                                                  nonzero_coords))
             bounding_box_stop = 1 + numpy.array(
                 map(numpy.max, nonzero_coords))
             destination[0] = [
                 roiToSlice(bounding_box_start, bounding_box_stop)
             ]
         else:
             destination[0] = []
     else:
         assert False, "Unknown output slot: {}".format(slot.name)
     return destination
Пример #51
0
    def _executeCurrentFragmentSegmentation(self, roi, result):
        # Start with the original raveler object
        self.CurrentRavelerObject(roi.start, roi.stop).writeInto(result).wait()

        lut = self._opFragmentSetLutCache.Output[:].wait()

        slicing = roiToSlice( roi.start[1:4], roi.stop[1:4] )
        a = result[0,...,0]
        b = lut[self._mst.regionVol[slicing]] # (Advanced indexing)

        # Use bitwise_and instead of numpy.where to avoid the temporary caused by a == 0
        #a[:] = numpy.where( a == 0, 0, b )
        assert self.CurrentFragmentSegmentation.meta.dtype == numpy.uint8, "This code assumes uint8 as the dtype!"
        a[:] *= 0xFF # Assumes uint8
        numpy.bitwise_and( a, b, out=a )
        return result
Пример #52
0
    def execute(self, slot, subindex, roi, result):
        with self._lock:
            if self.cache is None:
                shape = self.Input.meta.shape
                # self.blockshape has None in the last dimension to indicate that it should not be
                # handled block-wise. None is replaced with the image shape in the respective axis.
                fullBlockShape = []
                for u, v in zip(self.blockShape.value, shape):
                    if u is not None:
                        fullBlockShape.append(u)
                    else:
                        fullBlockShape.append(v)
                fullBlockShape = numpy.array(fullBlockShape,
                                             dtype=numpy.float64)

                # data = self.inputs["Input"][:].wait()
                # split up requests into blocks

                numBlocks = numpy.ceil(shape / fullBlockShape).astype("int")
                blockCache = numpy.ndarray(shape=numpy.prod(numBlocks),
                                           dtype=self.Output.meta.dtype)
                pool = RequestPool()
                # blocks holds the different roi keys for each of the blocks
                blocks = itertools.product(
                    *[list(range(i)) for i in numBlocks])
                blockKeys = []
                for b in blocks:
                    start = b * fullBlockShape
                    stop = b * fullBlockShape + fullBlockShape
                    stop = numpy.min(numpy.vstack((stop, shape)), axis=0)
                    blockKey = roiToSlice(start, stop)
                    blockKeys.append(blockKey)

                fun = self.inputs["Function"].value

                def predict_block(i):
                    data = self.Input[blockKeys[i]].wait()
                    blockCache[i] = fun(data)

                for i, f in enumerate(blockCache):
                    req = pool.request(partial(predict_block, i))

                pool.wait()
                pool.clean()

                self.cache = [fun(blockCache)]
            return self.cache
Пример #53
0
    def _executeBlockwiseRegionFeatures(self, roi, destination):
        """
        Provide data for the BlockwiseRegionFeatures slot.
        Note: Each block produces a single element of this slot's output.  Construct requested roi coordinates accordingly.
              e.g. if block_shape is (1,10,10,10,1), the features for the block starting at 
                   (1,20,30,40,5) should be requested via roi [(1,2,3,4,5),(2,3,4,5,6)]
        
        Note: It is assumed that you will request these features for debug purposes, AFTER requesting the prediction image.
              Therefore, it is considered an error to request features that are not already computed.
        """
        axiskeys = self.RawImage.meta.getAxisKeys()
        # Find the corresponding block start coordinates
        block_shape = self._getFullShape(self.BlockShape3dDict.value)
        pixel_roi = numpy.array(block_shape) * (roi.start, roi.stop)
        block_starts = getIntersectingBlocks(block_shape, pixel_roi)
        block_starts = map(tuple, block_starts)

        # TODO: Parallelize this?
        for block_start in block_starts:
            assert block_start in self._blockPipelines, "Not allowed to request region features for blocks that haven't yet been processed."  # See note above

            # Discard spatial axes to get (t,c) index for region slot roi
            tagged_block_start = zip(axiskeys, block_start)
            tagged_block_start_tc = filter(lambda (k, v): k in 'tc',
                                           tagged_block_start)
            block_start_tc = map(lambda (k, v): v, tagged_block_start_tc)
            block_roi_tc = (block_start_tc,
                            block_start_tc + numpy.array([1, 1]))
            block_roi_t = (block_roi_tc[0][:-1], block_roi_tc[1][:-1])

            assert sys.version_info.major == 2, "Alert! This loop has not been tested "\
            "under python 3. Please remove this assetion and be wary of any strnage behavior you encounter"
            destination_start = numpy.array(
                block_start) // block_shape - roi.start
            destination_stop = destination_start + numpy.array(
                [1] * len(axiskeys))

            opBlockPipeline = self._blockPipelines[block_start]
            req = opBlockPipeline.BlockwiseRegionFeatures(*block_roi_t)
            destination_without_channel = destination[roiToSlice(
                destination_start, destination_stop)]
            destination_with_channel = destination_without_channel[
                ..., block_roi_tc[0][-1]:block_roi_tc[1][-1]]
            req.writeInto(destination_with_channel)
            req.wait()

        return destination
    def execute(self, slot, subindex, roi, result):
        key = roiToSlice(roi.start, roi.stop)
        index = subindex[0]
        if slot.name == "NonzeroLabelBlocks":
            # Split into 10 chunks
            blocks = []
            slicing = [slice(0,max) for max in self.dataShape]
            for i in range(10):
                slicing[2] = slice(i*10, (i+1)*10)
                if not (self._data[index][slicing] == 0).all():
                    blocks.append( list(slicing) )

            result[0] = blocks
        if slot.name == "LabelImages":
            result[...] = self._data[index][key]
        if slot.name == "PredictionProbabilities":
            result[...] = self.predictionData[key]
Пример #55
0
    def testBasic(self):
        tiled_volume = TiledVolume(self.data_setup.VOLUME_DESCRIPTION_FILE)
        roi = numpy.array([(10, 150, 100), (30, 550, 550)])
        result_out = numpy.zeros(roi[1] - roi[0],
                                 dtype=tiled_volume.description.dtype)
        tiled_volume.read(roi, result_out)

        ref_path_comp = PathComponents(self.data_setup.REFERENCE_VOL_PATH)
        with h5py.File(ref_path_comp.externalPath, "r") as f:
            ref_data = f[ref_path_comp.internalPath][:]

        expected = ref_data[roiToSlice(*roi)]

        # numpy.save('/tmp/expected.npy', expected)
        # numpy.save('/tmp/result_out.npy', result_out)

        assert (expected == result_out).all()
Пример #56
0
    def test3(self):
        # Generate a random dataset and see if it we get the right masking from the operator.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)

        # Provide input read all output.
        self.operator_identity.Input.setValue(numpy.zeros_like(data))
        output = self.operator_identity.Output[None].wait()

        assert (output == 0).all()

        # Try setInSlot
        data_shape_roi = roiFromShape(data.shape)
        data_shape_slice = roiToSlice(*data_shape_roi)
        self.operator_identity.Input[data_shape_slice] = data
        output = self.operator_identity.Output[None].wait()

        assert (data == output).all()
    def execute(self, slot, subindex, roi, result):
        assert slot == self.Output
        # Start with the original raveler object
        self.BodyMask(roi.start, roi.stop).writeInto(result).wait()

        lut = self.FragmentLut[:].wait()
        mst = self.MST.value

        slicing = roiToSlice( roi.start[1:4], roi.stop[1:4] )
        a = result[0,...,0]
        b = lut[mst.regionVol[slicing]] # (Advanced indexing)

        # Use bitwise_and instead of numpy.where to avoid the temporary caused by a == 0
        #a[:] = numpy.where( a == 0, 0, b )
        assert result.dtype == numpy.uint8, "This code assumes uint8 as the dtype!"
        a[:] *= 0xFF # Assumes uint8
        numpy.bitwise_and( a, b, out=a )
        return result
Пример #58
0
def export_to_tiles(volume, tile_size, output_dir, print_progress=True):
    """
    volume: The volume to export (either hdf5 dataset or numpy array).  Must be 3D.
    tile_size: The width of the tiles to generate
    output_dir: The directory to dump the tiles to.
    """
    assert len(volume.shape) == 3

    tile_blockshape = (1, tile_size, tile_size)
    tile_starts = getIntersectingBlocks(tile_blockshape,
                                        roiFromShape(volume.shape))

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger.info("Writing {} tiles ...".format(len(tile_starts)))
    for tile_start in tile_starts:
        tile_roi = getBlockBounds(volume.shape, tile_blockshape, tile_start)

        if print_progress:
            sys.stdout.write("Tile: {} ".format(tile_roi))
            sys.stdout.flush()

        tile_data = volume[roiToSlice(*tile_roi)]
        tile_data = vigra.taggedView(tile_data, 'zyx')

        if print_progress:
            sys.stdout.write('reading... ')
            sys.stdout.flush()

        tile_name = 'tile_z{:05}_y{:05}_x{:05}.png'.format(*tile_start)
        output_path = os.path.join(output_dir, tile_name)

        if print_progress:
            sys.stdout.write('writing... ')
            sys.stdout.flush()

        vigra.impex.writeImage(tile_data[0], output_path, dtype='NATIVE')

        if print_progress:
            sys.stdout.write('done.\n')
            sys.stdout.flush()

    logger.info("TILES COMPLETE.")
Пример #59
0
    def autoSeedBackground(cls, laneView, foreground_label):
        # Seed the entire image with background labels, except for the individual label in question
        # To save memory, we'll do this in blocks instead of all at once

        volume_shape = laneView.RavelerLabels.meta.shape
        volume_roi = roiFromShape( volume_shape )
        block_shape = (OpSplitBodyCarving.BLOCK_SIZE,) * len( volume_shape ) 
        block_shape = numpy.minimum( block_shape, volume_shape )
        block_starts = getIntersectingBlocks( block_shape, volume_roi )

        logger.debug("Auto-seeding {} blocks for label".format( len(block_starts), foreground_label ))
        for block_index, block_start in enumerate(block_starts):
            block_roi = getBlockBounds( volume_shape, block_shape, block_start )
            label_block = laneView.RavelerLabels(*block_roi).wait()
            background_block = numpy.where( label_block == foreground_label, 0, 1 )
            background_block = numpy.asarray( background_block, numpy.float32 ) # Distance transform requires float
            if (background_block == 0.0).any():
                # We need to leave a small border between the background seeds and the object membranes
                background_block_view = background_block.view( vigra.VigraArray )
                background_block_view.axistags = copy.copy( laneView.RavelerLabels.meta.axistags )
                
                background_block_view_4d = background_block_view.bindAxis('t', 0)
                background_block_view_3d = background_block_view_4d.bindAxis('c', 0)
                
                distance_transformed_block = vigra.filters.distanceTransform3D(background_block_view_3d, background=False)
                distance_transformed_block = distance_transformed_block.astype( numpy.uint8 )
                
                # Create a 'hull' surrounding the foreground, but leave some space.
                background_seed_block = (distance_transformed_block == OpSplitBodyCarving.SEED_MARGIN)
                background_seed_block = background_seed_block.astype(numpy.uint8) * 1 # (In carving, background is label 1)

#                # Make the hull VERY sparse to avoid over-biasing graph cut toward the background class
#                # FIXME: Don't regenerate this random block on every loop iteration
#                rand_bytes = numpy.random.randint(0, 1000, background_seed_block.shape)
#                background_seed_block = numpy.where( rand_bytes < 1, background_seed_block, 0 )
#                background_seed_block = background_seed_block.view(vigra.VigraArray)
#                background_seed_block.axistags = background_block_view_3d.axistags
                
                axisorder = laneView.RavelerLabels.meta.getTaggedShape().keys()
                
                logger.debug("Writing backgound seeds: {}/{}".format( block_index, len(block_starts) ))
                laneView.WriteSeeds[ roiToSlice( *block_roi ) ] = background_seed_block.withAxes(*axisorder)
            else:
                logger.debug("Skipping all-background block: {}/{}".format( block_index, len(block_starts) ))
Пример #60
0
    def _execute_Output_impl(self, request_roi, result):
        request_roi = self._standardize_roi(*request_roi)
        with self._lock:
            block_roi = self._get_containing_block_roi(request_roi)
            if block_roi is not None:
                # Data is already in the cache. Just extract it.
                block_relative_roi = numpy.array(request_roi) - block_roi[0]
                self.Output.stype.copy_data(
                    result, self._block_data[block_roi][roiToSlice(
                        *block_relative_roi)])
                return

        if self.Input.meta.dontcache:
            # Data isn't in the cache, but we don't want to cache it anyway.
            self.Input(*request_roi).writeInto(result).block()
            return

        # Data isn't in the cache, so request it and cache it
        self._fetch_and_store_block(request_roi, out=result)