コード例 #1
0
    def testSetInSlot(self):
        logger.info("Generating sample data...")
        sampleData = numpy.indices((100, 200, 150), dtype=numpy.float32).sum(0)
        sampleData = sampleData.view( vigra.VigraArray )
        sampleData.axistags = vigra.defaultAxistags('xyz')
        
        graph = Graph()
        opData = OpArrayPiper( graph=graph )
        opData.Input.setValue( sampleData )
        
        op = OpCompressedCache( parent=None, graph=graph )
        #logger.debug("Setting block shape...")
        op.BlockShape.setValue( [100, 75, 50] )
        op.Input.connect( opData.Output )
        
        assert op.Output.ready()
        
        slicing = numpy.s_[ 0:100, 0:75, 0:50 ]
        expectedData = numpy.ones( slicing2shape(slicing), dtype=int )

        # This is what we're testing.
        #logger.debug("Forcing external data...")
        op.Input[slicing] = expectedData
        
        #logger.debug("Requesting data...")
        readData = op.Output[slicing].wait()
        
        #logger.debug("Checking data...")    
        assert (readData == expectedData).all(), "Incorrect output!"
コード例 #2
0
    def testDirtyPropagation(self):
        g = Graph()
        vol = np.asarray(
            [[0, 0, 0, 0],
             [0, 0, 1, 1],
             [0, 1, 0, 1],
             [0, 1, 0, 1]], dtype=np.uint8)
        vol = vigra.taggedView(vol, axistags='xy').withAxes(*'xyz')

        chunkShape = (2, 2, 1)

        opCache = OpCompressedCache(graph=g)
        opCache.Input.setValue(vol)
        opCache.BlockShape.setValue(chunkShape)

        op = OpLazyCC(graph=g)
        op.Input.connect(opCache.Output)
        op.ChunkShape.setValue(chunkShape)

        out1 = op.Output[:2, :2].wait()
        assert np.all(out1 == 0)

        opCache.Input[0:1, 0:1, 0:1] = np.asarray([[[1]]], dtype=np.uint8)

        out2 = op.Output[:1, :1].wait()
        assert np.all(out2 > 0)
コード例 #3
0
    def setupOutputs(self):
        self._disconnectInternals()

        # we need a new cache
        cache = OpCompressedCache(parent=self)
        cache.name = self.name + "WrappedCache"

        # connect cache outputs
        self.CleanBlocks.connect(cache.CleanBlocks)
        self.OutputHdf5.connect(cache.OutputHdf5)
        self._op2.Input.connect(cache.Output)

        # connect cache inputs
        cache.InputHdf5.connect(self.InputHdf5)
        cache.Input.connect(self._op1.Output)

        # set the cache block shape
        tagged_shape = self._op1.Output.meta.getTaggedShape()
        tagged_shape['t'] = 1
        tagged_shape['c'] = 1
        cacheshape = map(lambda k: tagged_shape[k], 'xyzct')
        if _labeling_impl == "lazy":
            #HACK hardcoded block shape
            blockshape = numpy.minimum(cacheshape, 256)
        else:
            # use full spatial volume if not lazy
            blockshape = cacheshape
        cache.BlockShape.setValue(tuple(blockshape))

        self._cache = cache
コード例 #4
0
    def testHDF5(self):
        logger.info("Generating sample data...")
        sampleData = numpy.indices((150, 250, 150), dtype=numpy.float32).sum(0)
        sampleData = sampleData.view( vigra.VigraArray )
        sampleData.axistags = vigra.defaultAxistags('xyz')

        graph = Graph()
        opData = OpArrayPiper( graph=graph )
        opData.Input.setValue( sampleData )

        op = OpCompressedCache( parent=None, graph=graph )
        #logger.debug("Setting block shape...")
        op.BlockShape.setValue( [75, 125, 150] )
        op.Input.connect( opData.Output )

        assert op.OutputHdf5.ready()

        slicing = numpy.s_[ 0:75, 125:250, 0:150 ]
        slicing_str = str([list(_) for _ in zip(*[[_.start, _.stop]  for _ in slicing])])
        expectedData = sampleData[slicing].view(numpy.ndarray)

        slicing_2 = numpy.s_[ 0:75, 0:125, 0:150 ]
        expectedData_2 = expectedData[slicing_2].view(numpy.ndarray)

        #logger.debug("Requesting data...")
        tempdir = tempfile.mkdtemp()

        try:
            with h5py.File(os.path.join(tempdir, "data.h5"), "w") as h5_file:
                op.OutputHdf5[slicing].writeInto(h5_file).wait()

                assert slicing_str in h5_file, "Missing dataset!"
                assert (h5_file[slicing_str][()] == expectedData).all(), "Incorrect output!"

            with h5py.File(os.path.join(tempdir, "data.h5"), "r") as h5_file:
                graph = Graph()
                opData = OpArrayPiper( graph=graph )
                opData.Input.meta.axistags = vigra.AxisTags('xyz')
                opData.Input.setValue( numpy.empty_like(expectedData_2) )

                op = OpCompressedCache( parent=None, graph=graph )
                op.InputHdf5.meta.axistags = vigra.AxisTags('xyz')
                op.InputHdf5.meta.shape = (75, 125, 150)
                #logger.debug("Setting block shape...")
                op.BlockShape.setValue( [75, 125, 150] )
                op.Input.connect( opData.Output )

                op.InputHdf5[slicing_2] = h5_file[slicing_str]

                result = op.Output[slicing_2].wait()

                assert (result == expectedData_2).all(), "Incorrect output!"
        finally:
            shutil.rmtree(tempdir)
コード例 #5
0
    def __init__(self, *args, **kwargs):
        super( OpSplitBodySupervoxelExport, self ).__init__(*args, **kwargs)

        # HACK: Be sure that the output slots are resized if the raveler body list changes
        self.AnnotationBodyIds.notifyDirty( bind(self._setupOutputs) )

        # Prepare a set of OpSelectLabels for easy access to raveler object masks
        self._opSelectLabel = OperatorWrapper( OpSelectLabel, parent=self, broadcastingSlotNames=['Input'] )
        self._opSelectLabel.Input.connect( self.RavelerLabels )
        self.EditedRavelerBodies.connect( self._opSelectLabel.Output )

        # Mask in the body of interest
        self._opMaskedSelect = OperatorWrapper( OpMaskedSelectUint32, parent=self, broadcastingSlotNames=['Input'] )
        self._opMaskedSelect.Input.connect( self.Supervoxels )
        self._opMaskedSelect.Mask.connect( self._opSelectLabel.Output )
        self.MaskedSupervoxels.connect( self._opMaskedSelect.Output )        

        # Must run CC before filter, to ensure that discontiguous labels can't avoid the filter.
        self._opRelabelMaskedSupervoxels = OperatorWrapper( OpVigraLabelVolume, parent=self )
        self._opRelabelMaskedSupervoxels.Input.connect( self._opMaskedSelect.Output )
        
        self._opRelabeledMaskedSupervoxelCaches = OperatorWrapper( OpCompressedCache, parent=self )
        self._opRelabeledMaskedSupervoxelCaches.Input.connect( self._opRelabelMaskedSupervoxels.Output )

        # Filter out the small CC to eliminate tiny pieces of supervoxels that overlap the mask boundaries
        self._opSmallLabelFilter = OperatorWrapper( OpFilterLabels, parent=self, broadcastingSlotNames=['MinLabelSize'] )
        self._opSmallLabelFilter.MinLabelSize.setValue( 10 )
        self._opSmallLabelFilter.Input.connect( self._opRelabeledMaskedSupervoxelCaches.Output )

        self._opSmallLabelFilterCaches = OperatorWrapper( OpCompressedCache, parent=self )
        self._opSmallLabelFilterCaches.Input.connect( self._opSmallLabelFilter.Output )
        self.FilteredMaskedSupervoxels.connect( self._opSmallLabelFilterCaches.Output )

        # Re-fill the holes left by the filter using region growing (with a mask)
        self._opMaskedWatersheds =  OperatorWrapper( OpMaskedWatershed, parent=self )
        self._opMaskedWatersheds.Input.connect( self.InputData )
        self._opMaskedWatersheds.Mask.connect( self._opSelectLabel.Output )
        self._opMaskedWatersheds.Seeds.connect( self._opSmallLabelFilterCaches.Output )

        # Cache is necessary because it ensures that the entire volume is used for watershed.
        self._opMaskedWatershedCaches = OperatorWrapper( OpCompressedCache, parent=self )
        self._opMaskedWatershedCaches.Input.connect( self._opMaskedWatersheds.Output )
        self.HoleFilledSupervoxels.connect( self._opMaskedWatershedCaches.Output )

        # Relabel the supervoxels in the mask to ensure contiguous supervoxels (after mask) and consecutive labels
        self._opRelabelMergedSupervoxels = OperatorWrapper( OpVigraLabelVolume, parent=self )
        self._opRelabelMergedSupervoxels.Input.connect( self._opMaskedWatershedCaches.Output )
        
        self._opRelabeledMergedSupervoxelCaches = OperatorWrapper( OpCompressedCache, parent=self )
        self._opRelabeledMergedSupervoxelCaches.Input.connect( self._opRelabelMergedSupervoxels.Output )
        self.RelabeledSupervoxels.connect( self._opRelabeledMergedSupervoxelCaches.Output )

        self._opAccumulateFinalImage = OpAccumulateFragmentSegmentations( parent=self )
        self._opAccumulateFinalImage.RavelerLabels.connect( self.RavelerLabels )
        self._opAccumulateFinalImage.FragmentSegmentations.connect( self._opRelabeledMergedSupervoxelCaches.Output )
        
        self._opFinalCache = OpCompressedCache( parent=self )
        self._opFinalCache.Input.connect( self._opAccumulateFinalImage.Output )
        self.FinalSupervoxels.connect( self._opFinalCache.Output )
        self.SupervoxelMapping.connect( self._opAccumulateFinalImage.Mapping )
コード例 #6
0
    def testBasic4d_txyc_masked(self):
        logger.info("Generating sample data...")
        sampleData = numpy.indices((3, 200, 150, 2),
                                   dtype=numpy.float32).sum(0)
        sampleData = sampleData.view(numpy.ma.masked_array)
        sampleData.set_fill_value(numpy.float32(numpy.nan))
        sampleData[0] = numpy.ma.masked

        graph = Graph()
        opData = OpArrayPiper(graph=graph)
        opData.Input.meta.has_mask = True
        opData.Input.meta.axistags = vigra.defaultAxistags("txyc")
        opData.Input.setValue(sampleData)

        op = OpCompressedCache(parent=None, graph=graph)
        # logger.debug("Setting block shape...")
        op.BlockShape.setValue([1, 75, 50, 2])
        op.Input.connect(opData.Output)

        assert op.Output.ready()

        slicing = numpy.s_[1:3, 50:150, 75:150, 0:1]
        expectedData = sampleData[slicing]

        # logger.debug("Requesting data...")
        readData = op.Output[slicing].wait()

        # logger.debug("Checking data...")
        assert ((readData == expectedData).all()
                and (readData.mask == expectedData.mask).all()
                and ((readData.fill_value == expectedData.fill_value)
                     | (numpy.isnan(readData.fill_value) & numpy.isnan(
                         expectedData.fill_value))).all()), "Incorrect output!"
コード例 #7
0
    def testBasic4d_txyc(self):
        logger.info("Generating sample data...")
        sampleData = numpy.indices((3, 200, 150, 2),
                                   dtype=numpy.float32).sum(0)
        sampleData = sampleData.view(vigra.VigraArray)
        sampleData.axistags = vigra.defaultAxistags("txyc")

        graph = Graph()
        opData = OpArrayPiper(graph=graph)
        opData.Input.setValue(sampleData)

        op = OpCompressedCache(parent=None, graph=graph)
        # logger.debug("Setting block shape...")
        op.BlockShape.setValue([1, 75, 50, 2])
        op.Input.connect(opData.Output)

        assert op.Output.ready()

        slicing = numpy.s_[1:3, 50:150, 75:150, 0:1]
        expectedData = sampleData[slicing].view(numpy.ndarray)

        # logger.debug("Requesting data...")
        readData = op.Output[slicing].wait()

        # logger.debug("Checking data...")
        assert (readData == expectedData).all(), "Incorrect output!"
コード例 #8
0
    def testLazyness(self):
        g = Graph()
        vol = np.asarray(
            [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0],
             [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0],
             [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0],
             [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0],
             [0, 0, 0, 0, 0, 0, 0, 0, 0]],
            dtype=np.uint8)
        vol = vigra.taggedView(vol, axistags='xy').withAxes(*'xyz')
        chunkShape = (3, 3, 1)

        opCount = OpExecuteCounter(graph=g)
        opCount.Input.setValue(vol)

        opCache = OpCompressedCache(graph=g)
        opCache.Input.connect(opCount.Output)
        opCache.BlockShape.setValue(chunkShape)

        op = OpLazyCC(graph=g)
        op.Input.connect(opCache.Output)
        op.ChunkShape.setValue(chunkShape)

        out = op.Output[:3, :3].wait()
        n = 3
        assert opCount.numCalls <= n,\
            "Executed {} times (allowed: {})".format(opCount.numCalls,
                                                     n)
コード例 #9
0
    def __init__(self, *args, **kwargs):
        super(_OpCachedLabelImage, self).__init__(*args, **kwargs)

        # Hook up the labeler
        self._opLabelImage = OpLabelImage(parent=self)
        self._opLabelImage.Input.connect(self.Input)
        self._opLabelImage.BackgroundLabels.connect(self.BackgroundLabels)

        # Hook up the cache
        self._opCache = OpCompressedCache(parent=self)
        self._opCache.Input.connect(self._opLabelImage.Output)
        self._opCache.InputHdf5.connect(self.InputHdf5)

        # Hook up our output slots
        self.Output.connect(self._opCache.Output)
        self.CleanBlocks.connect(self._opCache.CleanBlocks)
        self.OutputHdf5.connect(self._opCache.OutputHdf5)
コード例 #10
0
ファイル: opLabelVolume.py プロジェクト: thatcher/lazyflow
 def __init__(self, *args, **kwargs):
     super(OpLabelingABC, self).__init__(*args, **kwargs)
     self._cache = OpCompressedCache(parent=self)
     self._cache.name = "OpLabelVolume.OutputCache"
     self._cache.Input.connect(self.Output)
     self.CachedOutput.connect(self._cache.Output)
     self._cache.InputHdf5.connect(self.InputHdf5)
     self.OutputHdf5.connect(self._cache.OutputHdf5)
     self.CleanBlocks.connect(self._cache.CleanBlocks)
コード例 #11
0
    def __init__(self, *args, **kwargs):
        warn_deprecated(
            "OpCachedLabelImage is deprecated, use OpLabelVolume instead")
        super(OpCachedLabelImage, self).__init__(*args, **kwargs)

        # Hook up the labeler
        self._opLabelImage = OpLabelImage(parent=self)
        self._opLabelImage.Input.connect(self.Input)
        self._opLabelImage.BackgroundLabels.connect(self.BackgroundLabels)

        # Hook up the cache
        self._opCache = OpCompressedCache(parent=self)
        self._opCache.Input.connect(self._opLabelImage.Output)
        self._opCache.InputHdf5.connect(self.InputHdf5)

        # Hook up our output slots
        self.Output.connect(self._opCache.Output)
        self.CleanBlocks.connect(self._opCache.CleanBlocks)
        self.OutputHdf5.connect(self._opCache.OutputHdf5)
コード例 #12
0
    def testReasonableCompression(self):
        # compression should be *way* better than this
        expected_factor = 4.0
        graph = Graph()
        sampleData = numpy.zeros((10000, 1000), dtype=numpy.uint8)
        sampleData = vigra.taggedView(sampleData, axistags="xy")

        opData = OpArrayPiper(graph=graph)
        opData.Input.setValue(sampleData)

        op = OpCompressedCache(parent=None, graph=graph)
        op.Input.connect(opData.Output)

        assert op.Output.ready()
        assert op.usedMemory() == 0.0, "cache must not be filled at this point"
        op.Output[...].wait()
        assert op.usedMemory() <= (
            sampleData.nbytes / expected_factor
        ), "Compression of all-zeroes should be better than factor " "{}".format(expected_factor)
コード例 #13
0
    def testIdealBlockShapeChoice(self):
        sampleData = numpy.indices((150, 250, 350), dtype=numpy.float32).sum(0)
        sampleData = vigra.taggedView(sampleData, axistags='xyz')
        graph = Graph()
        opData = OpArrayPiper(graph=graph)

        # 1) input has valid ideal_blockshape, no block shape given
        print("1)")
        ideal = (33, 33, 33)
        opData.Input.meta.ideal_blockshape = ideal
        opData.Input.setValue(sampleData)
        op = OpCompressedCache(graph=graph)
        op.Input.connect(opData.Output)

        assert op.Output.ready()
        assert_array_equal(op.Output.meta.ideal_blockshape, op._blockshape)

        # 2) input has invalid ideal_blockshape
        print("2)")
        ideal = (33, 33)
        opData.Input.meta.ideal_blockshape = ideal
        opData.Input.setValue(None)
        opData.Input.setValue(sampleData)
        op = OpCompressedCache(graph=graph)
        op.Input.connect(opData.Output)

        assert op.Output.ready()
        assert len(op.Output.meta.ideal_blockshape) == 3

        # 3) input has valid ideal_blockshape, but BlockShape is incompatible
        print("3)")
        ideal = (33, 33, 33)
        blockShape = (50, 50, 50)
        opData.Input.meta.ideal_blockshape = ideal
        opData.Input.setValue(None)
        opData.Input.setValue(sampleData)
        op = OpCompressedCache(graph=graph)
        op.Input.connect(opData.Output)
        op.BlockShape.setValue(blockShape)

        assert op.Output.ready()
        assert_array_equal(op.Output.meta.ideal_blockshape, blockShape)
コード例 #14
0
    def testReasonableCompression(self):
        # compression should be *way* better than this
        expected_factor = 4.0
        graph = Graph()
        sampleData = numpy.zeros((10000, 1000), dtype=numpy.uint8)
        sampleData = vigra.taggedView(sampleData, axistags='xy')

        opData = OpArrayPiper(graph=graph)
        opData.Input.setValue(sampleData)

        op = OpCompressedCache(parent=None, graph=graph)
        op.Input.connect(opData.Output)

        assert op.Output.ready()
        assert op.usedMemory() == 0.0,\
            "cache must not be filled at this point"
        op.Output[...].wait()
        assert op.usedMemory() <= (sampleData.nbytes /expected_factor),\
            "Compression of all-zeroes should be better than factor "\
            "{}".format(expected_factor)
コード例 #15
0
def mkH5source(fname, gname):
    h5file = h5py.File(fname)
    source = OpStreamingHdf5Reader(graph=graph)
    source.Hdf5File.setValue(h5file)
    source.InternalPath.setValue(gname)

    op = OpCompressedCache(parent=None, graph=graph)
    op.BlockShape.setValue([100, 100, 100])
    op.Input.connect(source.OutputImage)

    return op.Output
コード例 #16
0
    def testSetInSlot_masked(self):
        logger.info("Generating sample data...")
        sampleData = numpy.indices((100, 200, 150), dtype=numpy.float32).sum(0)
        sampleData = sampleData.view(numpy.ma.masked_array)
        sampleData.set_fill_value(numpy.float32(numpy.nan))
        sampleData[0] = numpy.ma.masked

        graph = Graph()
        opData = OpArrayPiper(graph=graph)
        opData.Input.meta.has_mask = True
        opData.Input.meta.axistags = vigra.defaultAxistags("xyz")
        opData.Input.setValue(sampleData)

        op = OpCompressedCache(parent=None, graph=graph)
        # logger.debug("Setting block shape...")
        op.BlockShape.setValue([100, 75, 50])
        op.Input.connect(opData.Output)

        assert op.Output.ready()

        slicing = numpy.s_[0:100, 0:75, 0:50]
        expectedData = numpy.ma.ones(slicing2shape(slicing), dtype=int)
        sampleData.set_fill_value(numpy.float32(numpy.nan))
        expectedData[0] = numpy.ma.masked

        # This is what we're testing.
        # logger.debug("Forcing external data...")
        op.Input[slicing] = expectedData

        # logger.debug("Requesting data...")
        readData = op.Output[slicing].wait()

        # logger.debug("Checking data...")
        assert (
            (readData == expectedData).all()
            and (readData.mask == expectedData.mask).all()
            and (
                (readData.fill_value == expectedData.fill_value)
                | (numpy.isnan(readData.fill_value) & numpy.isnan(expectedData.fill_value))
            ).all()
        ), "Incorrect output!"
コード例 #17
0
    def testChangeBlockshape_masked(self):
        logger.info("Generating sample data...")
        sampleData = numpy.indices((100, 200, 150), dtype=numpy.float32).sum(0)
        sampleData = sampleData.view(numpy.ma.masked_array)
        sampleData.set_fill_value(numpy.float32(numpy.nan))
        sampleData[0] = numpy.ma.masked

        graph = Graph()
        opData = OpArrayPiper(graph=graph)
        opData.Input.meta.has_mask = True
        opData.Input.meta.axistags = vigra.defaultAxistags('xyz')
        opData.Input.setValue(sampleData)

        op = OpCompressedCache(parent=None, graph=graph)
        #logger.debug("Setting block shape...")
        op.BlockShape.setValue([100, 75, 50])
        op.Input.connect(opData.Output)

        assert op.Output.ready()

        slicing = numpy.s_[0:100, 50:150, 75:150]
        expectedData = sampleData[slicing]

        #logger.debug("Requesting data...")
        readData = op.Output[slicing].wait()

        #logger.debug("Checking data...")
        assert (readData == expectedData).all() and \
               (readData.mask == expectedData.mask).all() and \
               ((readData.fill_value == expectedData.fill_value) |
                (numpy.isnan(readData.fill_value) & numpy.isnan(expectedData.fill_value))).all(),\
            "Incorrect output!"

        # Now change the blockshape and the input and try again...
        sampleDataWithChannel = sampleData[..., None]
        opData.Input.meta.axistags = vigra.defaultAxistags('xyzc')
        opData.Input.setValue(sampleDataWithChannel)
        op.BlockShape.setValue([45, 33, 40, 1])

        assert op.Output.ready()

        slicing = numpy.s_[60:70, 50:110, 60:120, 0:1]
        expectedData = sampleDataWithChannel[slicing]

        #logger.debug("Requesting data...")
        readData = op.Output[slicing].wait()

        #logger.debug("Checking data...")
        assert (readData == expectedData).all() and \
               (readData.mask == expectedData.mask).all() and \
               ((readData.fill_value == expectedData.fill_value) |
                (numpy.isnan(readData.fill_value) & numpy.isnan(expectedData.fill_value))).all(),\
            "Incorrect output!"
コード例 #18
0
    def __init__(self, *args, **kwargs):
        super(OpIpht, self).__init__(*args, **kwargs)
        self._opIpht = OpIphtNoCache(parent=self)
        self._opIpht.InputImage.connect(self.InputImage)
        self._opIpht.MinSize.connect(self.MinSize)
        self._opIpht.MaxSize.connect(self.MaxSize)
        self._opIpht.HighThreshold.connect(self.HighThreshold)
        self._opIpht.LowThreshold.connect(self.LowThreshold)
        self.Output.connect(self._opIpht.Output)

        self._opCache = OpCompressedCache(parent=self)
        self._opCache.Input.connect(self._opIpht.Output)
        self.CachedOutput.connect(self._opCache.Output)
コード例 #19
0
    def testDirtyPropagation(self):
        g = Graph()
        vol = np.asarray([[0, 0, 0, 0], [0, 0, 1, 1], [0, 1, 0, 1], [0, 1, 0, 1]], dtype=np.uint8)
        vol = vigra.taggedView(vol, axistags="yx").withAxes(*"zyx")

        chunkShape = (1, 2, 2)

        opCache = OpCompressedCache(graph=g)
        opCache.Input.setValue(vol)
        opCache.BlockShape.setValue(chunkShape)

        op = OpLazyCC(graph=g)
        op.Input.connect(opCache.Output)
        op.ChunkShape.setValue(chunkShape)

        out1 = op.Output[:, :2, :2].wait()
        assert np.all(out1 == 0)

        opCache.Input[0:1, 0:1, 0:1] = np.asarray([[[1]]], dtype=np.uint8)

        out2 = op.Output[:, :1, :1].wait()
        assert np.all(out2 > 0)
コード例 #20
0
    def testSetInSlot_masked(self):
        logger.info("Generating sample data...")
        sampleData = numpy.indices((100, 200, 150), dtype=numpy.float32).sum(0)
        sampleData = sampleData.view(numpy.ma.masked_array)
        sampleData.set_fill_value(numpy.float32(numpy.nan))
        sampleData[0] = numpy.ma.masked

        graph = Graph()
        opData = OpArrayPiper(graph=graph)
        opData.Input.meta.has_mask = True
        opData.Input.meta.axistags = vigra.defaultAxistags('xyz')
        opData.Input.setValue(sampleData)

        op = OpCompressedCache(parent=None, graph=graph)
        #logger.debug("Setting block shape...")
        op.BlockShape.setValue([100, 75, 50])
        op.Input.connect(opData.Output)

        assert op.Output.ready()

        slicing = numpy.s_[0:100, 0:75, 0:50]
        expectedData = numpy.ma.ones(slicing2shape(slicing), dtype=int)
        sampleData.set_fill_value(numpy.float32(numpy.nan))
        expectedData[0] = numpy.ma.masked

        # This is what we're testing.
        #logger.debug("Forcing external data...")
        op.Input[slicing] = expectedData

        #logger.debug("Requesting data...")
        readData = op.Output[slicing].wait()

        #logger.debug("Checking data...")
        assert (readData == expectedData).all() and \
               (readData.mask == expectedData.mask).all() and \
               ((readData.fill_value == expectedData.fill_value) |
                (numpy.isnan(readData.fill_value) & numpy.isnan(expectedData.fill_value))).all(),\
            "Incorrect output!"
コード例 #21
0
    def testMultiThread_masked(self):
        logger.info("Generating sample data...")
        sampleData = numpy.indices((3, 100, 200, 150, 2),
                                   dtype=numpy.float32).sum(0)
        sampleData = sampleData.view(numpy.ma.masked_array)
        sampleData.set_fill_value(numpy.float32(numpy.nan))
        sampleData[0] = numpy.ma.masked

        graph = Graph()
        opData = OpArrayPiper(graph=graph)
        opData.Input.meta.has_mask = True
        opData.Input.meta.axistags = vigra.defaultAxistags("txyzc")
        opData.Input.setValue(sampleData)

        op = OpCompressedCache(parent=None, graph=graph)
        # logger.debug("Setting block shape...")
        op.BlockShape.setValue([1, 100, 75, 50, 2])
        op.Input.connect(opData.Output)

        assert op.Output.ready()

        slicing = numpy.s_[0:2, 0:100, 50:150, 75:150, 0:1]
        expectedData = sampleData[slicing]

        results = {}

        def readData(resultIndex):
            results[resultIndex] = op.Output[slicing].wait()

        threads = []
        for i in range(10):
            threads.append(
                threading.Thread(target=functools.partial(readData, i)))

        for th in threads:
            th.start()

        for th in threads:
            th.join()

        assert len(results) == len(threads), "Didn't get all results."

        # logger.debug("Checking data...")
        for i, data in list(results.items()):
            assert ((data == expectedData).all()
                    and (data.mask == expectedData.mask).all()
                    and ((data.fill_value == expectedData.fill_value)
                         | (numpy.isnan(data.fill_value)
                            & numpy.isnan(expectedData.fill_value))).all()
                    ), "Incorrect output for index {}".format(i)
コード例 #22
0
    def setupOutputs(self):
        self._disconnectInternals()

        # we need a new cache
        cache = OpCompressedCache(parent=self)
        cache.name = self.name + "WrappedCache"

        # connect cache outputs
        self.CleanBlocks.connect(cache.CleanBlocks)
        self.OutputHdf5.connect(cache.OutputHdf5)
        self._op2.Input.connect(cache.Output)

        # connect cache inputs
        cache.InputHdf5.connect(self.InputHdf5)
        cache.Input.connect(self._op1.Output)

        # set the cache block shape
        tagged_shape = self._op1.Output.meta.getTaggedShape()
        tagged_shape['t'] = 1
        tagged_shape['c'] = 1
        blockshape = map(lambda k: tagged_shape[k], 'xyzct')
        cache.BlockShape.setValue(tuple(blockshape))

        self._cache = cache
コード例 #23
0
    def __init__(self, *args, **kwargs):

        super(OpObjectExtraction, self).__init__(*args, **kwargs)

        # internal operators
        #TODO BinaryImage is not binary in some workflows, could be made more
        # efficient
        self._opLabelVolume = OpLabelVolume(parent=self)
        self._opLabelVolume.name = "OpObjectExtraction._opLabelVolume"
        self._opRegFeats = OpCachedRegionFeatures(parent=self)
        self._opRegFeatsAdaptOutput = OpAdaptTimeListRoi(parent=self)
        self._opObjectCenterImage = OpObjectCenterImage(parent=self)

        # connect internal operators
        self._opLabelVolume.Input.connect(self.BinaryImage)
        self._opLabelVolume.InputHdf5.connect(self.LabelInputHdf5)
        self._opLabelVolume.Background.connect(self.BackgroundLabels)

        self._opRegFeats.RawImage.connect(self.RawImage)
        self._opRegFeats.LabelImage.connect(self._opLabelVolume.CachedOutput)
        self._opRegFeats.Features.connect(self.Features)
        self.RegionFeaturesCleanBlocks.connect(self._opRegFeats.CleanBlocks)

        self._opRegFeats.CacheInput.connect(self.RegionFeaturesCacheInput)

        self._opRegFeatsAdaptOutput.Input.connect(self._opRegFeats.Output)

        self._opObjectCenterImage.BinaryImage.connect(self.BinaryImage)
        self._opObjectCenterImage.RegionCenters.connect(
            self._opRegFeatsAdaptOutput.Output)

        self._opCenterCache = OpCompressedCache(parent=self)
        self._opCenterCache.name = "OpObjectExtraction._opCenterCache"
        self._opCenterCache.Input.connect(self._opObjectCenterImage.Output)

        # connect outputs
        self.LabelImage.connect(self._opLabelVolume.CachedOutput)
        self.ObjectCenterImage.connect(self._opCenterCache.Output)
        self.RegionFeatures.connect(self._opRegFeatsAdaptOutput.Output)
        self.BlockwiseRegionFeatures.connect(self._opRegFeats.Output)
        self.LabelOutputHdf5.connect(self._opLabelVolume.OutputHdf5)
        self.CleanLabelBlocks.connect(self._opLabelVolume.CleanBlocks)
        self.ComputedFeatureNames.connect(self.Features)

        # As soon as input data is available, check its constraints
        self.RawImage.notifyReady(self._checkConstraints)
        self.BinaryImage.notifyReady(self._checkConstraints)
コード例 #24
0
ファイル: opCachedLabelImage.py プロジェクト: CVML/lazyflow
    def __init__(self, *args, **kwargs):
        super(_OpCachedLabelImage, self).__init__(*args, **kwargs)
        
        # Hook up the labeler
        self._opLabelImage = OpLabelImage( parent=self )
        self._opLabelImage.Input.connect( self.Input )
        self._opLabelImage.BackgroundLabels.connect( self.BackgroundLabels )

        # Hook up the cache
        self._opCache = OpCompressedCache( parent=self )
        self._opCache.Input.connect( self._opLabelImage.Output )
        self._opCache.InputHdf5.connect( self.InputHdf5 )
        
        # Hook up our output slots
        self.Output.connect( self._opCache.Output )
        self.CleanBlocks.connect( self._opCache.CleanBlocks )
        self.OutputHdf5.connect( self._opCache.OutputHdf5 )
コード例 #25
0
    def testCleanup(self):
        sampleData = numpy.indices((100, 200, 150), dtype=numpy.float32).sum(0)
        sampleData = vigra.taggedView(sampleData, axistags='xyz')

        graph = Graph()
        opData = OpArrayPiper(graph=graph)
        opData.Input.setValue(sampleData)

        op = OpCompressedCache(graph=graph)
        #logger.debug("Setting block shape...")
        op.BlockShape.setValue([100, 75, 50])
        op.Input.connect(opData.Output)
        x = op.Output[...].wait()
        op.Input.disconnect()
        r = weakref.ref(op)
        del op
        gc.collect()
        assert r() is None, "OpBlockedArrayCache was not cleaned up correctly"
コード例 #26
0
ファイル: opCachedLabelImage.py プロジェクト: burcin/lazyflow
    def __init__(self, *args, **kwargs):
        warn_deprecated("OpCachedLabelImage is deprecated, use OpLabelVolume instead")
        super(OpCachedLabelImage, self).__init__(*args, **kwargs)
        
        # Hook up the labeler
        self._opLabelImage = OpLabelImage( parent=self )
        self._opLabelImage.Input.connect( self.Input )
        self._opLabelImage.BackgroundLabels.connect( self.BackgroundLabels )

        # Hook up the cache
        self._opCache = OpCompressedCache( parent=self )
        self._opCache.Input.connect( self._opLabelImage.Output )
        self._opCache.InputHdf5.connect( self.InputHdf5 )
        
        # Hook up our output slots
        self.Output.connect( self._opCache.Output )
        self.CleanBlocks.connect( self._opCache.CleanBlocks )
        self.OutputHdf5.connect( self._opCache.OutputHdf5 )
コード例 #27
0
    def testReconnectWithoutRequest(self):
        vol = numpy.zeros((200, 100, 50), dtype=numpy.float32)
        vol1 = vigra.taggedView(vol, axistags='xyz')
        vol2 = vigra.taggedView(vol, axistags='zyx').withAxes(*'xyz')
        graph = Graph()

        opData1 = OpArrayPiper(graph=graph)
        opData1.Input.setValue(vol1)

        op = OpCompressedCache(graph=graph)
        op.Input.connect(opData1.Output)
        op.BlockShape.setValue((200, 100, 10))
        out = op.Output[...].wait()

        op.BlockShape.setValue((50, 100, 10))

        # Older versions of OpCompressedCache threw an exception here because
        #  we tried to access the cache after changing the blockshape.
        # But in the current version, we claim that's okay.
        out = op.Output[...].wait()
コード例 #28
0
    def testChangeBlockshape(self):
        logger.info("Generating sample data...")
        sampleData = numpy.indices((100, 200, 150), dtype=numpy.float32).sum(0)
        sampleData = sampleData.view(vigra.VigraArray)
        sampleData.axistags = vigra.defaultAxistags('xyz')

        graph = Graph()
        opData = OpArrayPiper(graph=graph)
        opData.Input.setValue(sampleData)

        op = OpCompressedCache(parent=None, graph=graph)
        #logger.debug("Setting block shape...")
        op.BlockShape.setValue([100, 75, 50])
        op.Input.connect(opData.Output)

        assert op.Output.ready()

        slicing = numpy.s_[0:100, 50:150, 75:150]
        expectedData = sampleData[slicing].view(numpy.ndarray)

        #logger.debug("Requesting data...")
        readData = op.Output[slicing].wait()

        #logger.debug("Checking data...")
        assert (readData == expectedData).all(), "Incorrect output!"

        # Now change the blockshape and the input and try again...
        sampleDataWithChannel = sampleData.withAxes(*'xyzc')
        opData.Input.setValue(sampleDataWithChannel)
        op.BlockShape.setValue([45, 33, 40, 1])

        assert op.Output.ready()

        slicing = numpy.s_[60:70, 50:110, 60:120, 0:1]
        expectedData = sampleDataWithChannel[slicing].view(numpy.ndarray)

        #logger.debug("Requesting data...")
        readData = op.Output[slicing].wait()

        #logger.debug("Checking data...")
        assert (readData == expectedData).all(), "Incorrect output!"
コード例 #29
0
    def testFree(self):
        sampleData = numpy.indices((100, 200, 150), dtype=numpy.float32).sum(0)
        sampleData = vigra.taggedView(sampleData, axistags='xyz')

        graph = Graph()
        opData = OpArrayPiperWithAccessCount(graph=graph)
        opData.Input.setValue(sampleData)

        op = OpCompressedCache(graph=graph)
        #logger.debug("Setting block shape...")
        op.BlockShape.setValue([100, 75, 50])
        op.Input.connect(opData.Output)

        op.Output[...].wait()
        mem = op.usedMemory()
        keys = [x[0] for x in op.getBlockAccessTimes()]
        key = keys[0]
        op.freeBlock(key)
        assert op.usedMemory() < mem
コード例 #30
0
    def testReportGeneration(self):
        graph = Graph()
        sampleData = numpy.random.randint(0, 256, size=(50, 50, 50))
        sampleData = sampleData.astype(numpy.uint8)
        sampleData = vigra.taggedView(sampleData, axistags='xyz')

        opData = OpArrayPiper(graph=graph)
        opData.Input.setValue(sampleData)

        op = OpCompressedCache(parent=None, graph=graph)
        op.BlockShape.setValue((25, 25, 25))
        op.Input.connect(opData.Output)

        before = time.time()
        assert op.Output.ready()
        assert op.usedMemory() == 0.0,\
            "cache must not be filled at this point"
        op.Output[...].wait()
        assert op.usedMemory() > 0.0,\
            "cache must contain data at this point"
        after = time.time()

        r = MemInfoNode()
        op.generateReport(r)
        # not sure how good this can be compressed, but the cache
        # should hold memory by now
        assert r.usedMemory > 0
        # check sanity of last access time
        assert r.lastAccessTime >= before, str(r.lastAccessTime)
        assert r.lastAccessTime <= after, str(r.lastAccessTime)
        assert r.fractionOfUsedMemoryDirty == 0.0

        opData.Input.setDirty((slice(0, 25), slice(0, 25), slice(0, 25)))
        assert op.fractionOfUsedMemoryDirty() < 1.0
        assert op.fractionOfUsedMemoryDirty() > 0

        opData.Input.setDirty(slice(None))
        assert op.fractionOfUsedMemoryDirty() == 1.0
コード例 #31
0
    def testReportGeneration(self):
        graph = Graph()
        sampleData = numpy.random.randint(0, 256, size=(50, 50, 50))
        sampleData = sampleData.astype(numpy.uint8)
        sampleData = vigra.taggedView(sampleData, axistags='xyz')

        opData = OpArrayPiper(graph=graph)
        opData.Input.setValue(sampleData)

        op = OpCompressedCache(parent=None, graph=graph)
        op.BlockShape.setValue((25, 25, 25))
        op.Input.connect(opData.Output)

        before = time.time()
        assert op.Output.ready()
        assert op.usedMemory() == 0.0,\
            "cache must not be filled at this point"
        op.Output[...].wait()
        assert op.usedMemory() > 0.0,\
            "cache must contain data at this point"
        after = time.time()

        r = MemInfoNode()
        op.generateReport(r)
        # not sure how good this can be compressed, but the cache
        # should hold memory by now
        assert r.usedMemory > 0
        # check sanity of last access time
        assert r.lastAccessTime >= before, str(r.lastAccessTime)
        assert r.lastAccessTime <= after, str(r.lastAccessTime)
        assert r.fractionOfUsedMemoryDirty == 0.0

        opData.Input.setDirty(
            (slice(0, 25), slice(0, 25), slice(0, 25)))
        assert op.fractionOfUsedMemoryDirty() < 1.0
        assert op.fractionOfUsedMemoryDirty() > 0

        opData.Input.setDirty(slice(None))
        assert op.fractionOfUsedMemoryDirty() == 1.0
コード例 #32
0
    def testReconnectWithoutRequest_masked(self):
        vol = numpy.ma.zeros((200, 100, 50), dtype=numpy.float32)
        vol.set_fill_value(numpy.float32(numpy.nan))
        vol[0] = numpy.ma.masked
        vol1 = vol
        vol2 = vol1.T
        graph = Graph()

        opData1 = OpArrayPiper(graph=graph)
        opData1.Input.meta.has_mask = True
        opData1.Input.meta.axistags = vigra.defaultAxistags('xyz')
        opData1.Input.setValue(vol1)

        op = OpCompressedCache(graph=graph)
        op.Input.connect(opData1.Output)
        op.BlockShape.setValue((200, 100, 10))
        out = op.Output[...].wait()

        assert (out == vol).all() and \
               (out.mask == vol.mask).all() and \
               ((out.fill_value == vol.fill_value) |
                (numpy.isnan(out.fill_value) & numpy.isnan(vol.fill_value))).all(),\
            "Incorrect output!"

        op.BlockShape.setValue((50, 100, 10))

        # Older versions of OpCompressedCache threw an exception here because
        #  we tried to access the cache after changing the blockshape.
        # But in the current version, we claim that's okay.
        out = op.Output[...].wait()

        assert (out == vol).all() and \
               (out.mask == vol.mask).all() and \
               ((out.fill_value == vol.fill_value) |
                (numpy.isnan(out.fill_value) & numpy.isnan(vol.fill_value))).all(),\
            "Incorrect output!"
コード例 #33
0
    def testFree(self):
        sampleData = numpy.indices((100, 200, 150), dtype=numpy.float32).sum(0)
        sampleData = vigra.taggedView(sampleData, axistags='xyz')
        
        graph = Graph()
        opData = OpArrayPiperWithAccessCount(graph=graph)
        opData.Input.setValue(sampleData)
        
        op = OpCompressedCache(graph=graph)
        #logger.debug("Setting block shape...")
        op.BlockShape.setValue([100, 75, 50])
        op.Input.connect(opData.Output)

        op.Output[...].wait()
        mem = op.usedMemory()
        keys = map(lambda x: x[0], op.getBlockAccessTimes())
        key = keys[0]
        op.freeBlock(key)
        assert op.usedMemory() < mem
コード例 #34
0
    def __init__(self, *args, **kwargs):
        super(_OpThresholdTwoLevels, self).__init__(*args, **kwargs)

        self._opLowThresholder = OpPixelOperator(parent=self)
        self._opLowThresholder.Input.connect(self.InputImage)

        self._opHighThresholder = OpPixelOperator(parent=self)
        self._opHighThresholder.Input.connect(self.InputImage)

        self._opLowLabeler = OpLabelVolume(parent=self)
        self._opLowLabeler.Method.setValue(_labeling_impl)
        self._opLowLabeler.Input.connect(self._opLowThresholder.Output)

        self._opHighLabeler = OpLabelVolume(parent=self)
        self._opHighLabeler.Method.setValue(_labeling_impl)
        self._opHighLabeler.Input.connect(self._opHighThresholder.Output)

        self._opHighLabelSizeFilter = OpFilterLabels(parent=self)
        self._opHighLabelSizeFilter.Input.connect(self._opHighLabeler.Output)
        self._opHighLabelSizeFilter.MinLabelSize.connect(self.MinSize)
        self._opHighLabelSizeFilter.MaxLabelSize.connect(self.MaxSize)
        self._opHighLabelSizeFilter.BinaryOut.setValue(
            False)  # we do the binarization in opSelectLabels
        # this way, we get to display pretty colors

        self._opSelectLabels = OpSelectLabels(parent=self)
        self._opSelectLabels.BigLabels.connect(self._opLowLabeler.Output)
        self._opSelectLabels.SmallLabels.connect(
            self._opHighLabelSizeFilter.Output)

        # remove the remaining very large objects -
        # they might still be present in case a big object
        # was split into many small ones for the higher threshold
        # and they got reconnected again at lower threshold
        self._opFinalLabelSizeFilter = OpFilterLabels(parent=self)
        self._opFinalLabelSizeFilter.Input.connect(self._opSelectLabels.Output)
        self._opFinalLabelSizeFilter.MinLabelSize.connect(self.MinSize)
        self._opFinalLabelSizeFilter.MaxLabelSize.connect(self.MaxSize)
        self._opFinalLabelSizeFilter.BinaryOut.setValue(False)

        self._opCache = OpCompressedCache(parent=self)
        self._opCache.name = "_OpThresholdTwoLevels._opCache"
        self._opCache.InputHdf5.connect(self.InputHdf5)
        self._opCache.Input.connect(self._opFinalLabelSizeFilter.Output)

        # Connect our own outputs
        self.Output.connect(self._opFinalLabelSizeFilter.Output)
        self.CachedOutput.connect(self._opCache.Output)

        # Serialization outputs
        self.CleanBlocks.connect(self._opCache.CleanBlocks)
        self.OutputHdf5.connect(self._opCache.OutputHdf5)

        #self.InputChannel.connect( self._opChannelSelector.Output )

        # More debug outputs.  These all go through their own caches
        self._opBigRegionCache = OpCompressedCache(parent=self)
        self._opBigRegionCache.name = "_OpThresholdTwoLevels._opBigRegionCache"
        self._opBigRegionCache.Input.connect(self._opLowThresholder.Output)
        self.BigRegions.connect(self._opBigRegionCache.Output)

        self._opSmallRegionCache = OpCompressedCache(parent=self)
        self._opSmallRegionCache.name = "_OpThresholdTwoLevels._opSmallRegionCache"
        self._opSmallRegionCache.Input.connect(self._opHighThresholder.Output)
        self.SmallRegions.connect(self._opSmallRegionCache.Output)

        self._opFilteredSmallLabelsCache = OpCompressedCache(parent=self)
        self._opFilteredSmallLabelsCache.name = "_OpThresholdTwoLevels._opFilteredSmallLabelsCache"
        self._opFilteredSmallLabelsCache.Input.connect(
            self._opHighLabelSizeFilter.Output)
        self._opColorizeSmallLabels = OpColorizeLabels(parent=self)
        self._opColorizeSmallLabels.Input.connect(
            self._opFilteredSmallLabelsCache.Output)
        self.FilteredSmallLabels.connect(self._opColorizeSmallLabels.Output)
コード例 #35
0
    def __init__(self, *args, **kwargs):
        super( OpSplitBodyPostprocessing, self ).__init__(*args, **kwargs)

        # HACK: Be sure that the output slots are resized if the raveler body list changes
        self.EditedRavelerBodyList.notifyDirty( bind(self._setupOutputs) )

        # Prepare a set of OpSelectLabels for easy access to raveler object masks
        self._opSelectLabel = OperatorWrapper( OpSelectLabel, parent=self, broadcastingSlotNames=['Input'] )
        self._opSelectLabel.Input.connect( self.RavelerLabels )
        self.EditedRavelerBodies.connect( self._opSelectLabel.Output )

        # Prepare a set of OpFragmentSetLuts to compute the lut of each body's fragments
        self._opFragmentSetLuts = OperatorWrapper( OpFragmentSetLut, parent=self, 
                                                   broadcastingSlotNames=['MST', 'CurrentEditingFragment', 'Trigger'] )
        self._opFragmentSetLuts.MST.connect( self.MST )
        self._opFragmentSetLuts.CurrentEditingFragment.setValue("")

        # Prepare a set of Fragment image generators
        self._opFragments = OperatorWrapper( OpFragment, parent=self, broadcastingSlotNames=['MST'] )
        self._opFragments.BodyMask.connect( self._opSelectLabel.Output )
        self._opFragments.FragmentLut.connect( self._opFragmentSetLuts.Lut )
        self._opFragments.MST.connect( self.MST )
        
        # Cache the fragment segmentations for each body
        self._opFragmentCaches = OperatorWrapper( OpCompressedCache, parent=self )
        self._opFragmentCaches.Input.connect( self._opFragments.Output )
        self.FragmentedBodies.connect( self._opFragmentCaches.Output )
        
        # CC is performed on the cached output, in part to ensure that the entire block is used.
        self._opRelabelFragments = OperatorWrapper( OpVigraLabelVolume, parent=self )
        self._opRelabelFragments.Input.connect( self._opFragmentCaches.Output )
        
        self._opRelabeledFragmentCaches = OperatorWrapper( OpCompressedCache, parent=self )
        self._opRelabeledFragmentCaches.Input.connect( self._opRelabelFragments.Output )
        self.RelabeledFragments.connect( self._opRelabeledFragmentCaches.Output )

        # Filter the small CC objects from the (relabeled) fragment segmentations
        self._opSmallFragmentFilter = OperatorWrapper( OpFilterLabels, parent=self, broadcastingSlotNames=['MinLabelSize'] )
        self._opSmallFragmentFilter.MinLabelSize.setValue( 50 )
        self._opSmallFragmentFilter.Input.connect( self._opRelabeledFragmentCaches.Output )

        self._opFilteredFragmentCaches = OperatorWrapper( OpCompressedCache, parent=self )
        self._opFilteredFragmentCaches.Input.connect( self._opSmallFragmentFilter.Output )
        self.FilteredFragmentedBodies.connect( self._opFilteredFragmentCaches.Output )

        # Watershed to fill the holes created by filtering.
        # Use a masked watershed to ensure that the watersheds stay within the bounds of the body
        self._opMaskedWatersheds =  OperatorWrapper( OpMaskedWatershed, parent=self )
        self._opMaskedWatersheds.Input.connect( self.InputData )
        self._opMaskedWatersheds.Mask.connect( self._opSelectLabel.Output )
        self._opMaskedWatersheds.Seeds.connect( self._opSmallFragmentFilter.Output )

        # Cache is necessary because it ensures that the entire volume is used for watershed.
        self._opMaskedWatershedCaches = OperatorWrapper( OpCompressedCache, parent=self )
        self._opMaskedWatershedCaches.Input.connect( self._opMaskedWatersheds.Output )
        self.WatershedFilledBodies.connect( self._opMaskedWatershedCaches.Output )

        self._opAccumulateFinalImage = OpAccumulateFragmentSegmentations( parent=self )
        self._opAccumulateFinalImage.RavelerLabels.connect( self.RavelerLabels )
        self._opAccumulateFinalImage.FragmentSegmentations.connect( self.WatershedFilledBodies )
        
        self._opFinalCache = OpCompressedCache( parent=self )
        self._opFinalCache.Input.connect( self._opAccumulateFinalImage.Output )
        self.FinalSegmentation.connect( self._opFinalCache.Output )

        # Cache serialization slots
        self._opFinalCache.InputHdf5.connect( self.FinalSegmentationHdf5CacheInput )
        self.FinalSegmentationCleanBlocks.connect( self._opFinalCache.CleanBlocks )
        self.FinalSegmentationHdf5CacheOutput.connect( self._opFinalCache.OutputHdf5 )
コード例 #36
0
    def testHDF5_masked(self):
        logger.info("Generating sample data...")
        sampleData = numpy.indices((150, 250, 150), dtype=numpy.float32).sum(0)
        sampleData = sampleData.view(numpy.ma.masked_array)
        sampleData.set_fill_value(numpy.float32(numpy.nan))
        sampleData[0] = numpy.ma.masked

        graph = Graph()
        opData = OpArrayPiper(graph=graph)
        opData.Input.meta.axistags = vigra.defaultAxistags('xyz')
        opData.Input.meta.has_mask = True
        opData.Input.setValue(sampleData)

        op = OpCompressedCache(parent=None, graph=graph)
        #logger.debug("Setting block shape...")
        op.BlockShape.setValue([75, 125, 150])
        op.Input.connect(opData.Output)

        assert op.OutputHdf5.ready()

        slicing = numpy.s_[0:75, 125:250, 0:150]
        slicing_str = str(
            [list(_) for _ in zip(*[[_.start, _.stop] for _ in slicing])])
        expectedData = sampleData[slicing]

        slicing_2 = numpy.s_[0:75, 0:125, 0:150]
        expectedData_2 = expectedData[slicing_2]

        #logger.debug("Requesting data...")
        tempdir = tempfile.mkdtemp()

        try:
            with h5py.File(os.path.join(tempdir, "data.h5"), "w") as h5_file:
                op.OutputHdf5[slicing].writeInto(h5_file).wait()

                assert slicing_str in h5_file, "Missing dataset!"

                assert (h5_file[slicing_str]["data"][()] == expectedData).all() and \
                       (h5_file[slicing_str]["mask"][()] == expectedData.mask).all() and \
                       ((h5_file[slicing_str]["fill_value"][()] == expectedData.fill_value) |
                        (numpy.isnan(h5_file[slicing_str]["fill_value"][()]) & numpy.isnan(expectedData.fill_value))).all(),\
                    "Incorrect output!"

            with h5py.File(os.path.join(tempdir, "data.h5"), "r") as h5_file:
                graph = Graph()

                opData = OpArrayPiper(graph=graph)
                opData.Input.meta.axistags = vigra.AxisTags('xyz')
                opData.Input.meta.has_mask = True
                opData.Input.setValue(numpy.empty_like(expectedData_2))

                op = OpCompressedCache(parent=None, graph=graph)
                op.InputHdf5.meta.axistags = vigra.AxisTags('xyz')
                op.InputHdf5.meta.has_mask = True
                op.InputHdf5.meta.shape = (75, 125, 150)
                #logger.debug("Setting block shape...")
                op.BlockShape.setValue([75, 125, 150])
                op.Input.connect(opData.Output)

                op.InputHdf5[slicing_2] = h5_file[slicing_str]

                result = op.Output[slicing_2].wait()

                assert (result == expectedData).all() and \
                       (result.mask == expectedData.mask).all() and \
                       ((result.fill_value == expectedData.fill_value) |
                        (numpy.isnan(result.fill_value) & numpy.isnan(expectedData.fill_value))).all(),\
                    "Incorrect output!"
        finally:
            shutil.rmtree(tempdir)
コード例 #37
0
ファイル: opCachedLabelImage.py プロジェクト: burcin/lazyflow
class OpCachedLabelImage(OpCache):
    """
    Combines OpLabelImage with OpCompressedCache, and provides a default block shape.
    """
    Input = InputSlot()
    
    BackgroundLabels = InputSlot(optional=True) # Optional. See OpLabelImage for details.
    BlockShape = InputSlot(optional=True)   # If not provided, blockshape is 1 time slice, 1 channel slice, 
                                            #  and the entire volume in xyz.
    Output = OutputSlot()

    # Serialization support
    InputHdf5 = InputSlot(optional=True)
    CleanBlocks = OutputSlot()
    OutputHdf5 = OutputSlot() # See OpCachedLabelImage for details
    
    # Schematic:
    #
    # BackgroundLabels --     BlockShape --
    #                    \                 \
    # Input ------------> OpLabelImage ---> OpCompressedCache --> Output
    #                                                        \
    #                                                         --> CleanBlocks
    
    def __init__(self, *args, **kwargs):
        warn_deprecated("OpCachedLabelImage is deprecated, use OpLabelVolume instead")
        super(OpCachedLabelImage, self).__init__(*args, **kwargs)
        
        # Hook up the labeler
        self._opLabelImage = OpLabelImage( parent=self )
        self._opLabelImage.Input.connect( self.Input )
        self._opLabelImage.BackgroundLabels.connect( self.BackgroundLabels )

        # Hook up the cache
        self._opCache = OpCompressedCache( parent=self )
        self._opCache.Input.connect( self._opLabelImage.Output )
        self._opCache.InputHdf5.connect( self.InputHdf5 )
        
        # Hook up our output slots
        self.Output.connect( self._opCache.Output )
        self.CleanBlocks.connect( self._opCache.CleanBlocks )
        self.OutputHdf5.connect( self._opCache.OutputHdf5 )
        
    def generateReport(self, report):
        return self._opCache.generateReport(report)
    
    def usedMemory(self):
        return self._opCache.usedMemory()
    
    def fractionOfUsedMemoryDirty(self):
        return self._opCache.fractionOfUsedMemoryDirty()
    
    def lastAccessTime(self):
        return self._opCache.lastAccessTime()
    
    def setupOutputs(self):
        if self.BlockShape.ready():
            self._opCache.BlockShape.setValue( self.BlockShape.value )
        else:
            # By default, block shape is the same as the entire image shape,
            #  but only 1 time slice and 1 channel slice
            taggedBlockShape = self.Input.meta.getTaggedShape()
            taggedBlockShape['t'] = 1
            taggedBlockShape['c'] = 1
            self._opCache.BlockShape.setValue( tuple( taggedBlockShape.values() ) )

    def execute(self, slot, subindex, roi, destination):
        assert False, "Shouldn't get here."
    
    def propagateDirty(self, slot, subindex, roi):
        pass # Nothing to do...

    def setInSlot(self, slot, subindex, roi, value):
        assert slot == self.Input or slot == self.InputHdf5, "Invalid slot for setInSlot(): {}".format( slot.name )
コード例 #38
0
class OpCachedLabelImage(OpCache):
    """
    Combines OpLabelImage with OpCompressedCache, and provides a default block shape.
    """
    Input = InputSlot()

    BackgroundLabels = InputSlot(
        optional=True)  # Optional. See OpLabelImage for details.
    BlockShape = InputSlot(
        optional=True
    )  # If not provided, blockshape is 1 time slice, 1 channel slice,
    #  and the entire volume in xyz.
    Output = OutputSlot()

    # Serialization support
    InputHdf5 = InputSlot(optional=True)
    CleanBlocks = OutputSlot()
    OutputHdf5 = OutputSlot()  # See OpCachedLabelImage for details

    # Schematic:
    #
    # BackgroundLabels --     BlockShape --
    #                    \                 \
    # Input ------------> OpLabelImage ---> OpCompressedCache --> Output
    #                                                        \
    #                                                         --> CleanBlocks

    def __init__(self, *args, **kwargs):
        warn_deprecated(
            "OpCachedLabelImage is deprecated, use OpLabelVolume instead")
        super(OpCachedLabelImage, self).__init__(*args, **kwargs)

        # Hook up the labeler
        self._opLabelImage = OpLabelImage(parent=self)
        self._opLabelImage.Input.connect(self.Input)
        self._opLabelImage.BackgroundLabels.connect(self.BackgroundLabels)

        # Hook up the cache
        self._opCache = OpCompressedCache(parent=self)
        self._opCache.Input.connect(self._opLabelImage.Output)
        self._opCache.InputHdf5.connect(self.InputHdf5)

        # Hook up our output slots
        self.Output.connect(self._opCache.Output)
        self.CleanBlocks.connect(self._opCache.CleanBlocks)
        self.OutputHdf5.connect(self._opCache.OutputHdf5)

    def generateReport(self, report):
        return self._opCache.generateReport(report)

    def usedMemory(self):
        return self._opCache.usedMemory()

    def fractionOfUsedMemoryDirty(self):
        return self._opCache.fractionOfUsedMemoryDirty()

    def lastAccessTime(self):
        return self._opCache.lastAccessTime()

    def setupOutputs(self):
        if self.BlockShape.ready():
            self._opCache.BlockShape.setValue(self.BlockShape.value)
        else:
            # By default, block shape is the same as the entire image shape,
            #  but only 1 time slice and 1 channel slice
            taggedBlockShape = self.Input.meta.getTaggedShape()
            taggedBlockShape['t'] = 1
            taggedBlockShape['c'] = 1
            self._opCache.BlockShape.setValue(tuple(taggedBlockShape.values()))

    def execute(self, slot, subindex, roi, destination):
        assert False, "Shouldn't get here."

    def propagateDirty(self, slot, subindex, roi):
        pass  # Nothing to do...

    def setInSlot(self, slot, subindex, roi, value):
        assert slot == self.Input or slot == self.InputHdf5, "Invalid slot for setInSlot(): {}".format(
            slot.name)