Example #1
0
    def test_multithread(self):
        graph = lazyflow.graph.Graph()
        opCompute = TestOpValueCache.OpSlowComputation(graph=graph)
        opCache = OpValueCache(graph=graph)

        opCompute.Input.setValue(100)
        opCache.Input.connect(opCompute.Output)

        def checkOutput():
            assert opCache.Output.value == 100

        threads = []
        for i in range(100):
            threads.append(threading.Thread(target=checkOutput))

        for t in threads:
            t.start()

        for t in threads:
            t.join()

        assert opCompute.executionCount == 1
        assert opCache._dirty == False
        assert opCache._request is None
        assert opCache.Output.value == 100
Example #2
0
    def __init__(self, *args, **kwargs):
        """
        Instantiate the pipeline of internal operators and connect them together.
        
        Most of the the operators we use here are designed to handle a single 
        input image and produce a single output image (slot level=0).
        In those cases, we use the OperatorWrapper mechanism to dynamically manage 
        a list of these operators.  (When wrapped, the operators have slots with level=1.)
        
        (In ilastik, we use OpMultiLaneWrapper, which extends OperatorWrapper with extra functionality.)
        """
        super(OpSimplePixelClassification, self).__init__(*args, **kwargs)

        # SUMMARY SCHEMATIC:
        #
        #  ClassifierFactory ---------------------------------
        #                                                     \
        #  Labels ---> Wrapped(OpCompressedUserLabelArray) --> OpTrainClassifierBlocked --> OpValueCache -->
        #             /                                       /                                             \
        #  Features --                                       /                                               Wrapped(OpClassifierPredict) --> Predictions
        #             \                                     /                                               /
        #              Wrapped(OpBlockedArrayCache) --------------------------------------------------------        

        # LABEL CACHE(S)
        # We are really just using this as a cache for label data, which is loaded 'manually' in ingest_labels (below).
        # Therefore, none of these input slots is going to be used, but we need to configure them anyway,
        # or else the operator won't be 'ready'.
        self.opAllLabelCaches = OperatorWrapper( OpCompressedUserLabelArray, parent=self,
                                                 broadcastingSlotNames=['eraser', 'deleteLabel'] )
        self.opAllLabelCaches.Input.connect( self.Labels )
        self.opAllLabelCaches.deleteLabel.setValue( -1 )
        self.opAllLabelCaches.eraser.setValue( 255 )

        # FEATURE CACHE(S)
        self.opAllFeatureCaches = OperatorWrapper( OpBlockedArrayCache, parent=self,
                                                   broadcastingSlotNames=['fixAtCurrent'] )
        self.opAllFeatureCaches.fixAtCurrent.setValue(False) # Do not freeze caches
        self.opAllFeatureCaches.Input.connect( self.Features )

        # TRAINING OPERATOR
        self.opTrain = OpTrainClassifierBlocked( parent=self )
        self.opTrain.ClassifierFactory.connect( self.ClassifierFactory )
        self.opTrain.Labels.connect( self.opAllLabelCaches.Output )
        self.opTrain.Images.connect( self.opAllFeatureCaches.Output )
        self.opTrain.nonzeroLabelBlocks.connect( self.opAllLabelCaches.nonzeroBlocks )

        # CLASSIFIER CACHE
        # This cache stores exactly one object: the classifier itself.
        self.opClassifierCache = OpValueCache(parent=self)
        self.opClassifierCache.Input.connect( self.opTrain.Classifier )
        self.opClassifierCache.fixAtCurrent.setValue(False)

        # PREDICTION OPERATOR(S)
        self.opAllPredictors = OperatorWrapper( OpClassifierPredict, parent=self,
                                                broadcastingSlotNames=['Classifier', 'LabelsCount'] )
        self.opAllPredictors.Classifier.connect( self.opTrain.Classifier )
        self.opAllPredictors.LabelsCount.connect( self.opTrain.MaxLabel )
        self.opAllPredictors.Image.connect( self.opAllFeatureCaches.Output )
        self.Predictions.connect( self.opAllPredictors.PMaps )
Example #3
0
    def test_basic(self):
        graph = lazyflow.graph.Graph()
        op = OpValueCache(graph=graph)
        op.Input.setValue('Hello')
        assert op._dirty
        assert op.Output.value == 'Hello'

        outputDirtyCount = [0]
        def handleOutputDirty(slot, roi):
            outputDirtyCount[0] += 1
        op.Output.notifyDirty(handleOutputDirty)
        
        op.forceValue('Goodbye')
        # The cache itself isn't dirty (won't ask input for value)
        assert not op._dirty
        assert op.Output.value == 'Goodbye'
        
        # But the cache notified downstream slots that his value changed
        assert outputDirtyCount[0] == 1
Example #4
0
    def test_basic(self):
        graph = lazyflow.graph.Graph()
        op = OpValueCache(graph=graph)
        op.Input.setValue('Hello')
        assert op._dirty
        assert op.Output.value == 'Hello'

        outputDirtyCount = [0]
        def handleOutputDirty(slot, roi):
            outputDirtyCount[0] += 1
        op.Output.notifyDirty(handleOutputDirty)
        
        op.forceValue('Goodbye')
        # The cache itself isn't dirty (won't ask input for value)
        assert not op._dirty
        assert op.Output.value == 'Goodbye'
        
        # But the cache notified downstream slots that his value changed
        assert outputDirtyCount[0] == 1
Example #5
0
    def __init__(self,
                 graph=None,
                 hintOverlayFile=None,
                 pmapOverlayFile=None,
                 parent=None):
        super(OpCarving, self).__init__(graph=graph, parent=parent)
        self.opLabelArray = OpDenseLabelArray(parent=self)
        # self.opLabelArray.EraserLabelValue.setValue( 100 )
        self.opLabelArray.MetaInput.connect(self.InputData)

        self._hintOverlayFile = hintOverlayFile
        self._mst = None
        self.has_seeds = (
            False
        )  # keeps track of whether or not there are seeds currently loaded, either drawn by the user or loaded from a saved object

        self.LabelNames.setValue(["Background", "Object"])

        # supervoxels of finished and saved objects
        self._done_seg_lut = None
        self._hints = None
        self._pmap = None
        if hintOverlayFile is not None:
            try:
                f = h5py.File(hintOverlayFile, "r")
            except Exception as e:
                logger.info("Could not open hint overlay '%s'" %
                            hintOverlayFile)
                raise e
            self._hints = f["/hints"].value[numpy.newaxis, :, :, :,
                                            numpy.newaxis]

        if pmapOverlayFile is not None:
            try:
                f = h5py.File(pmapOverlayFile, "r")
            except Exception as e:
                raise RuntimeError("Could not open pmap overlay '%s'" %
                                   pmapOverlayFile)
            self._pmap = f["/data"].value[numpy.newaxis, :, :, :,
                                          numpy.newaxis]

        self._setCurrObjectName("<not saved yet>")
        self.HasSegmentation.setValue(False)

        # keep track of a set of object names that have changed since
        # the last serialization of this object to disk
        self._dirtyObjects = set()
        self.preprocessingApplet = None

        self._opMstCache = OpValueCache(parent=self)
        self.MstOut.connect(self._opMstCache.Output)

        self.InputData.notifyReady(self._checkConstraints)
        self.ObjectPrefix.setValue(DEFAULT_LABEL_PREFIX)
Example #6
0
    def test_cancel(self):
        """
        This ensures that the Output can be acessed from multiple 
        threads, even if one thread cancels its request.
        The OpValueCache must handle Request.InvalidRequestException errors correctly. 
        """
        n = 20
        graph = lazyflow.graph.Graph()
        opCompute = TestOpValueCache.OpSlowComputation(graph=graph)
        opCache = OpValueCache(graph=graph)

        opCompute.Input.setValue(100)
        opCache.Input.connect(opCompute.Output)

        s = 0
        while s in (0, n):
            # don't want to cancel all requests
            should_cancel = numpy.random.random(n) < .2
            s = should_cancel.sum()

        def checkOutput(i):
            req = opCache.Output[:]
            req.submit()
            if should_cancel[i]:
                value = req.wait()[0]
                assert value == 100
            else:
                # Cancel the request and mark the data dirty
                # (forces the next request to restart it.
                req.cancel()
                opCache._dirty = True

        # Create 20 threads, start them, and join them.
        threads = []
        for i in range(n):
            foo = partial(checkOutput, i)
            threads.append( threading.Thread(target=foo) )
        
        for t in threads:
            t.start()
            
        for t in threads:
            t.join()


        req = opCache.Output[:].wait()
        assert opCache._request is None
        assert opCache.Output.value == 100
Example #7
0
    def test_cancel(self):
        """
        This ensures that the Output can be acessed from multiple 
        threads, even if one thread cancels its request.
        The OpValueCache must handle Request.InvalidRequestException errors correctly. 
        """
        graph = lazyflow.graph.Graph()
        opCompute = TestOpValueCache.OpSlowComputation(graph=graph)
        opCache = OpValueCache(graph=graph)

        opCompute.Input.setValue(100)
        opCache.Input.connect(opCompute.Output)

        def checkOutput():
            req = opCache.Output[:]
            req.submit()
            if random.random() < 0.2:
                value = req.wait()[0]
                assert value == 100
            else:
                # Cancel the request and mark the data dirty
                # (forces the next request to restart it.
                req.cancel()
                opCache._dirty = True

        # Create 20 threads, start them, and join them.
        threads = []
        for i in range(20):
            threads.append( threading.Thread(target=checkOutput) )
        
        for t in threads:
            t.start()
            
        for t in threads:
            t.join()

        assert opCache.Output.value == 100