Exemplo n.º 1
0
    def __init__(self, *args, **kwargs):
        super(OpTrainVectorwiseClassifierBlocked, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
        
        self._opFeatureMatrixCaches = OperatorWrapper( OpFeatureMatrixCache, parent=self )
        self._opFeatureMatrixCaches.LabelImage.connect( self.Labels )
        self._opFeatureMatrixCaches.FeatureImage.connect( self.Images )
        
        self._opConcatenateFeatureMatrices = OpConcatenateFeatureMatrices( parent=self )
        self._opConcatenateFeatureMatrices.FeatureMatrices.connect( self._opFeatureMatrixCaches.LabelAndFeatureMatrix )
        self._opConcatenateFeatureMatrices.ProgressSignals.connect( self._opFeatureMatrixCaches.ProgressSignal )
        
        self._opTrainFromFeatures = OpTrainClassifierFromFeatureVectors( parent=self )
        self._opTrainFromFeatures.ClassifierFactory.connect( self.ClassifierFactory )
        self._opTrainFromFeatures.LabelAndFeatureMatrix.connect( self._opConcatenateFeatureMatrices.ConcatenatedOutput )
        self._opTrainFromFeatures.MaxLabel.connect( self.MaxLabel )
        
        self.Classifier.connect( self._opTrainFromFeatures.Classifier )

        # Progress reporting
        def _handleFeatureProgress( progress ):
            # Note that these progress messages will probably appear out-of-order.
            # See comments in OpFeatureMatrixCache
            logger.debug("Training: {:02}% (Computing features)".format(int(progress)))
            self.progressSignal( 0.8*progress )
        self._opConcatenateFeatureMatrices.progressSignal.subscribe( _handleFeatureProgress )
        
        def _handleTrainingComplete():
            logger.debug("Training: 100% (Complete)")
            self.progressSignal( 100.0 )
        self._opTrainFromFeatures.trainingCompleteSignal.subscribe( _handleTrainingComplete )
Exemplo n.º 2
0
    def __init__(self, *args, **kwargs):
        super(OpTrainVectorwiseClassifierBlocked, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
        
        self._opFeatureMatrixCaches = OperatorWrapper( OpFeatureMatrixCache, parent=self )
        self._opFeatureMatrixCaches.LabelImage.connect( self.Labels )
        self._opFeatureMatrixCaches.FeatureImage.connect( self.Images )
        self._opFeatureMatrixCaches.NonZeroLabelBlocks.connect( self.nonzeroLabelBlocks )
        
        self._opConcatenateFeatureMatrices = OpConcatenateFeatureMatrices( parent=self )
        self._opConcatenateFeatureMatrices.FeatureMatrices.connect( self._opFeatureMatrixCaches.LabelAndFeatureMatrix )
        self._opConcatenateFeatureMatrices.ProgressSignals.connect( self._opFeatureMatrixCaches.ProgressSignal )
        
        self._opTrainFromFeatures = OpTrainClassifierFromFeatureVectors( parent=self )
        self._opTrainFromFeatures.ClassifierFactory.connect( self.ClassifierFactory )
        self._opTrainFromFeatures.LabelAndFeatureMatrix.connect( self._opConcatenateFeatureMatrices.ConcatenatedOutput )
        self._opTrainFromFeatures.MaxLabel.connect( self.MaxLabel )
        
        self.Classifier.connect( self._opTrainFromFeatures.Classifier )

        # Progress reporting
        def _handleFeatureProgress( progress ):
            self.progressSignal( 0.8*progress )
        self._opConcatenateFeatureMatrices.progressSignal.subscribe( _handleFeatureProgress )
        
        def _handleTrainingComplete():
            self.progressSignal( 100.0 )
        self._opTrainFromFeatures.trainingCompleteSignal.subscribe( _handleTrainingComplete )
Exemplo n.º 3
0
    def __init__(self, *args, **kwargs):
        super(OpTrainPixelwiseClassifierBlocked,
              self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()

        # Normally, lane removal does not trigger a dirty notification.
        # But in this case, if the lane contained any label data whatsoever,
        #  the classifier needs to be marked dirty.
        # We know which slots contain (or contained) label data because they have
        # been 'touched' at some point (they became dirty at some point).
        self._touched_slots = set()

        def handle_new_lane(multislot, index, newlength):
            def handle_dirty_lane(slot, roi):
                self._touched_slots.add(slot)

            multislot[index].notifyDirty(handle_dirty_lane)

        self.Labels.notifyInserted(handle_new_lane)

        def handle_remove_lane(multislot, index, newlength):
            # If the lane we're removing contained
            # label data, then mark the downstream dirty
            if multislot[index] in self._touched_slots:
                self.Classifier.setDirty()
                self._touched_slots.remove(multislot[index])

        self.Labels.notifyRemove(handle_remove_lane)
Exemplo n.º 4
0
class OpTrainVectorwiseClassifierBlocked(Operator):
    Images = InputSlot(level=1)
    Labels = InputSlot(level=1)
    ClassifierFactory = InputSlot()
    MaxLabel = InputSlot()

    Classifier = OutputSlot()

    # Images[N] ---                                                                                         MaxLabel ------
    #              \                                                                                                       \
    # Labels[N] --> opFeatureMatrixCaches ---(FeatureImage[N])---> opConcatenateFeatureImages ---(label+feature matrix)---> OpTrainFromFeatures ---(Classifier)--->

    def __init__(self, *args, **kwargs):
        super(OpTrainVectorwiseClassifierBlocked, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()

        self._opFeatureMatrixCaches = OperatorWrapper(OpFeatureMatrixCache, parent=self)
        self._opFeatureMatrixCaches.LabelImage.connect(self.Labels)
        self._opFeatureMatrixCaches.FeatureImage.connect(self.Images)

        self._opConcatenateFeatureMatrices = OpConcatenateFeatureMatrices(parent=self)
        self._opConcatenateFeatureMatrices.FeatureMatrices.connect(self._opFeatureMatrixCaches.LabelAndFeatureMatrix)
        self._opConcatenateFeatureMatrices.ProgressSignals.connect(self._opFeatureMatrixCaches.ProgressSignal)

        self._opTrainFromFeatures = OpTrainClassifierFromFeatureVectors(parent=self)
        self._opTrainFromFeatures.ClassifierFactory.connect(self.ClassifierFactory)
        self._opTrainFromFeatures.LabelAndFeatureMatrix.connect(self._opConcatenateFeatureMatrices.ConcatenatedOutput)
        self._opTrainFromFeatures.MaxLabel.connect(self.MaxLabel)

        self.Classifier.connect(self._opTrainFromFeatures.Classifier)

        # Progress reporting
        def _handleFeatureProgress(progress):
            # Note that these progress messages will probably appear out-of-order.
            # See comments in OpFeatureMatrixCache
            logger.debug("Training: {:02}% (Computing features)".format(int(progress)))
            self.progressSignal(0.8 * progress)

        self._opConcatenateFeatureMatrices.progressSignal.subscribe(_handleFeatureProgress)

        def _handleTrainingComplete():
            logger.debug("Training: 100% (Complete)")
            self.progressSignal(100.0)

        self._opTrainFromFeatures.trainingCompleteSignal.subscribe(_handleTrainingComplete)

    def cleanUp(self):
        self.progressSignal.clean()
        self.Classifier.disconnect()
        super(OpTrainVectorwiseClassifierBlocked, self).cleanUp()

    def setupOutputs(self):
        pass  # Nothing to do; our output is connected to an internal operator.

    def execute(self, slot, subindex, roi, result):
        assert False, "Shouldn't get here..."

    def propagateDirty(self, slot, subindex, roi):
        pass
Exemplo n.º 5
0
class OpTrainVectorwiseClassifierBlocked(Operator):
    Images = InputSlot(level=1)
    Labels = InputSlot(level=1)
    ClassifierFactory = InputSlot()
    MaxLabel = InputSlot()

    Classifier = OutputSlot()

    # Images[N] ---                                                                                         MaxLabel ------
    #              \                                                                                                       \
    # Labels[N] --> opFeatureMatrixCaches ---(FeatureImage[N])---> opConcatenateFeatureImages ---(label+feature matrix)---> OpTrainFromFeatures ---(Classifier)--->

    def __init__(self, *args, **kwargs):
        super(OpTrainVectorwiseClassifierBlocked, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()

        self._opFeatureMatrixCaches = OperatorWrapper(OpFeatureMatrixCache, parent=self)
        self._opFeatureMatrixCaches.LabelImage.connect(self.Labels)
        self._opFeatureMatrixCaches.FeatureImage.connect(self.Images)

        self._opConcatenateFeatureMatrices = OpConcatenateFeatureMatrices(parent=self)
        self._opConcatenateFeatureMatrices.FeatureMatrices.connect(self._opFeatureMatrixCaches.LabelAndFeatureMatrix)
        self._opConcatenateFeatureMatrices.ProgressSignals.connect(self._opFeatureMatrixCaches.ProgressSignal)

        self._opTrainFromFeatures = OpTrainClassifierFromFeatureVectors(parent=self)
        self._opTrainFromFeatures.ClassifierFactory.connect(self.ClassifierFactory)
        self._opTrainFromFeatures.LabelAndFeatureMatrix.connect(self._opConcatenateFeatureMatrices.ConcatenatedOutput)
        self._opTrainFromFeatures.MaxLabel.connect(self.MaxLabel)

        self.Classifier.connect(self._opTrainFromFeatures.Classifier)

        # Progress reporting
        def _handleFeatureProgress(progress):
            # Note that these progress messages will probably appear out-of-order.
            # See comments in OpFeatureMatrixCache
            logger.debug("Training: {:02}% (Computing features)".format(int(progress)))
            self.progressSignal(0.8 * progress)

        self._opConcatenateFeatureMatrices.progressSignal.subscribe(_handleFeatureProgress)

        def _handleTrainingComplete():
            logger.debug("Training: 100% (Complete)")
            self.progressSignal(100.0)

        self._opTrainFromFeatures.trainingCompleteSignal.subscribe(_handleTrainingComplete)

    def cleanUp(self):
        self.progressSignal.clean()
        self.Classifier.disconnect()
        super(OpTrainVectorwiseClassifierBlocked, self).cleanUp()

    def setupOutputs(self):
        pass  # Nothing to do; our output is connected to an internal operator.

    def execute(self, slot, subindex, roi, result):
        assert False, "Shouldn't get here..."

    def propagateDirty(self, slot, subindex, roi):
        pass
Exemplo n.º 6
0
class OpTrainVectorwiseClassifierBlocked(Operator):
    Images = InputSlot(level=1)
    Labels = InputSlot(level=1)
    ClassifierFactory = InputSlot()
    nonzeroLabelBlocks = InputSlot(level=1) # TODO: Eliminate this slot. It isn't used any more...
    MaxLabel = InputSlot()
    
    Classifier = OutputSlot()
    
    # Images[N] ---                                                                                         MaxLabel ------
    #              \                                                                                                       \
    # Labels[N] --> opFeatureMatrixCaches ---(FeatureImage[N])---> opConcatenateFeatureImages ---(label+feature matrix)---> OpTrainFromFeatures ---(Classifier)--->

    def __init__(self, *args, **kwargs):
        super(OpTrainVectorwiseClassifierBlocked, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
        
        self._opFeatureMatrixCaches = OperatorWrapper( OpFeatureMatrixCache, parent=self )
        self._opFeatureMatrixCaches.LabelImage.connect( self.Labels )
        self._opFeatureMatrixCaches.FeatureImage.connect( self.Images )
        self._opFeatureMatrixCaches.NonZeroLabelBlocks.connect( self.nonzeroLabelBlocks )
        
        self._opConcatenateFeatureMatrices = OpConcatenateFeatureMatrices( parent=self )
        self._opConcatenateFeatureMatrices.FeatureMatrices.connect( self._opFeatureMatrixCaches.LabelAndFeatureMatrix )
        self._opConcatenateFeatureMatrices.ProgressSignals.connect( self._opFeatureMatrixCaches.ProgressSignal )
        
        self._opTrainFromFeatures = OpTrainClassifierFromFeatureVectors( parent=self )
        self._opTrainFromFeatures.ClassifierFactory.connect( self.ClassifierFactory )
        self._opTrainFromFeatures.LabelAndFeatureMatrix.connect( self._opConcatenateFeatureMatrices.ConcatenatedOutput )
        self._opTrainFromFeatures.MaxLabel.connect( self.MaxLabel )
        
        self.Classifier.connect( self._opTrainFromFeatures.Classifier )

        # Progress reporting
        def _handleFeatureProgress( progress ):
            self.progressSignal( 0.8*progress )
        self._opConcatenateFeatureMatrices.progressSignal.subscribe( _handleFeatureProgress )
        
        def _handleTrainingComplete():
            self.progressSignal( 100.0 )
        self._opTrainFromFeatures.trainingCompleteSignal.subscribe( _handleTrainingComplete )

    def cleanUp(self):
        self.progressSignal.clean()
        self.Classifier.disconnect()
        super( OpTrainVectorwiseClassifierBlocked, self ).cleanUp()

    def setupOutputs(self):
        pass # Nothing to do; our output is connected to an internal operator.

    def execute(self, slot, subindex, roi, result):
        assert False, "Shouldn't get here..."

    def propagateDirty(self, slot, subindex, roi):
        pass
Exemplo n.º 7
0
class OpTrainVectorwiseClassifierBlocked(Operator):
    Images = InputSlot(level=1)
    Labels = InputSlot(level=1)
    ClassifierFactory = InputSlot()
    nonzeroLabelBlocks = InputSlot(level=1) # TODO: Eliminate this slot. It isn't used any more...
    MaxLabel = InputSlot()
    
    Classifier = OutputSlot()
    
    # Images[N] ---                                                                                         MaxLabel ------
    #              \                                                                                                       \
    # Labels[N] --> opFeatureMatrixCaches ---(FeatureImage[N])---> opConcatenateFeatureImages ---(label+feature matrix)---> OpTrainFromFeatures ---(Classifier)--->

    def __init__(self, *args, **kwargs):
        super(OpTrainVectorwiseClassifierBlocked, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
        
        self._opFeatureMatrixCaches = OperatorWrapper( OpFeatureMatrixCache, parent=self )
        self._opFeatureMatrixCaches.LabelImage.connect( self.Labels )
        self._opFeatureMatrixCaches.FeatureImage.connect( self.Images )
        self._opFeatureMatrixCaches.NonZeroLabelBlocks.connect( self.nonzeroLabelBlocks )
        
        self._opConcatenateFeatureMatrices = OpConcatenateFeatureMatrices( parent=self )
        self._opConcatenateFeatureMatrices.FeatureMatrices.connect( self._opFeatureMatrixCaches.LabelAndFeatureMatrix )
        self._opConcatenateFeatureMatrices.ProgressSignals.connect( self._opFeatureMatrixCaches.ProgressSignal )
        
        self._opTrainFromFeatures = OpTrainClassifierFromFeatureVectors( parent=self )
        self._opTrainFromFeatures.ClassifierFactory.connect( self.ClassifierFactory )
        self._opTrainFromFeatures.LabelAndFeatureMatrix.connect( self._opConcatenateFeatureMatrices.ConcatenatedOutput )
        self._opTrainFromFeatures.MaxLabel.connect( self.MaxLabel )
        
        self.Classifier.connect( self._opTrainFromFeatures.Classifier )

        # Progress reporting
        def _handleFeatureProgress( progress ):
            self.progressSignal( 0.8*progress )
        self._opConcatenateFeatureMatrices.progressSignal.subscribe( _handleFeatureProgress )
        
        def _handleTrainingComplete():
            self.progressSignal( 100.0 )
        self._opTrainFromFeatures.trainingCompleteSignal.subscribe( _handleTrainingComplete )

    def cleanUp(self):
        self.progressSignal.clean()
        self.Classifier.disconnect()
        super( OpTrainVectorwiseClassifierBlocked, self ).cleanUp()

    def setupOutputs(self):
        pass # Nothing to do; our output is connected to an internal operator.

    def execute(self, slot, subindex, roi, result):
        assert False, "Shouldn't get here..."

    def propagateDirty(self, slot, subindex, roi):
        pass
Exemplo n.º 8
0
    def __init__(self, *args, **kwargs):
        super(OpTrainCounter, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
        self._svr = SVR()
        params = self._svr.get_params()
        self.initInputs(params)
        self.Classifier.meta.dtype = object
        self.Classifier.meta.shape = (self.numRegressors, )

        # Normally, lane removal does not trigger a dirty notification.
        # But in this case, if the lane contained any label data whatsoever,
        #  the classifier needs to be marked dirty.
        # We know which slots contain (or contained) label data because they have
        # been 'touched' at some point (they became dirty at some point).
        self._touched_slots = set()

        def handle_new_lane(multislot, index, newlength):
            def handle_dirty_lane(slot, roi):
                self._touched_slots.add(slot)

            multislot[index].notifyDirty(handle_dirty_lane)

        self.ForegroundLabels.notifyInserted(handle_new_lane)
        self.BackgroundLabels.notifyInserted(handle_new_lane)

        def handle_remove_lane(multislot, index, newlength):
            # If the lane we're removing contained
            # label data, then mark the downstream dirty
            if multislot[index] in self._touched_slots:
                self.Classifier.setDirty()
                self._touched_slots.remove(multislot[index])

        self.ForegroundLabels.notifyRemove(handle_remove_lane)
        self.BackgroundLabels.notifyRemove(handle_remove_lane)
Exemplo n.º 9
0
    def __init__(self, *args, **kwargs):
        super(OpTrainVectorwiseClassifierBlocked, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
        
        self._opFeatureMatrixCaches = OperatorWrapper( OpFeatureMatrixCache, parent=self )
        self._opFeatureMatrixCaches.LabelImage.connect( self.Labels )
        self._opFeatureMatrixCaches.FeatureImage.connect( self.Images )
        self._opFeatureMatrixCaches.NonZeroLabelBlocks.connect( self.nonzeroLabelBlocks )
        
        self._opConcatenateFeatureMatrices = OpConcatenateFeatureMatrices( parent=self )
        self._opConcatenateFeatureMatrices.FeatureMatrices.connect( self._opFeatureMatrixCaches.LabelAndFeatureMatrix )
        self._opConcatenateFeatureMatrices.ProgressSignals.connect( self._opFeatureMatrixCaches.ProgressSignal )
        
        self._opTrainFromFeatures = OpTrainClassifierFromFeatureVectors( parent=self )
        self._opTrainFromFeatures.ClassifierFactory.connect( self.ClassifierFactory )
        self._opTrainFromFeatures.LabelAndFeatureMatrix.connect( self._opConcatenateFeatureMatrices.ConcatenatedOutput )
        self._opTrainFromFeatures.MaxLabel.connect( self.MaxLabel )
        
        self.Classifier.connect( self._opTrainFromFeatures.Classifier )

        # Progress reporting
        def _handleFeatureProgress( progress ):
            self.progressSignal( 0.8*progress )
        self._opConcatenateFeatureMatrices.progressSignal.subscribe( _handleFeatureProgress )
        
        def _handleTrainingComplete():
            self.progressSignal( 100.0 )
        self._opTrainFromFeatures.trainingCompleteSignal.subscribe( _handleTrainingComplete )
Exemplo n.º 10
0
    def __init__(self, *args, **kwargs):
        super(OpTrainPixelwiseClassifierBlocked, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()

        # Normally, lane removal does not trigger a dirty notification.
        # But in this case, if the lane contained any label data whatsoever,
        #  the classifier needs to be marked dirty.
        # We know which slots contain (or contained) label data because they have
        # been 'touched' at some point (they became dirty at some point).
        self._touched_slots = set()

        def handle_new_lane(multislot, index, newlength):
            def handle_dirty_lane(slot, roi):
                self._touched_slots.add(slot)

            multislot[index].notifyDirty(handle_dirty_lane)

        self.Labels.notifyInserted(handle_new_lane)

        def handle_remove_lane(multislot, index, newlength):
            # If the lane we're removing contained
            # label data, then mark the downstream dirty
            if multislot[index] in self._touched_slots:
                self.Classifier.setDirty()
                self._touched_slots.remove(multislot[index])

        self.Labels.notifyRemove(handle_remove_lane)
Exemplo n.º 11
0
    def __init__(self, *args, **kwargs):
        super(OpTrainClassifierBlocked, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
        self._mode = None

        # Fully connect the vectorwise training operator
        self._opVectorwiseTrain = OpTrainVectorwiseClassifierBlocked(
            parent=self)
        self._opVectorwiseTrain.Images.connect(self.Images)
        self._opVectorwiseTrain.Labels.connect(self.Labels)
        self._opVectorwiseTrain.ClassifierFactory.connect(
            self.ClassifierFactory)
        self._opVectorwiseTrain.nonzeroLabelBlocks.connect(
            self.nonzeroLabelBlocks)
        self._opVectorwiseTrain.MaxLabel.connect(self.MaxLabel)
        self._opVectorwiseTrain.progressSignal.subscribe(self.progressSignal)

        # Fully connect the pixelwise training operator
        self._opPixelwiseTrain = OpTrainPixelwiseClassifierBlocked(parent=self)
        self._opPixelwiseTrain.Images.connect(self.Images)
        self._opPixelwiseTrain.Labels.connect(self.Labels)
        self._opPixelwiseTrain.ClassifierFactory.connect(
            self.ClassifierFactory)
        self._opPixelwiseTrain.nonzeroLabelBlocks.connect(
            self.nonzeroLabelBlocks)
        self._opPixelwiseTrain.MaxLabel.connect(self.MaxLabel)
        self._opPixelwiseTrain.progressSignal.subscribe(self.progressSignal)
Exemplo n.º 12
0
    def __init__(self, *args, **kwargs):
        super(OpBatchIoSelective, self).__init__(*args, **kwargs)

        self.Dirty.meta.shape = (1, )
        self.Dirty.meta.dtype = bool
        self.OutputDataPath.meta.shape = (1, )
        self.OutputDataPath.meta.dtype = object
        self.ExportResult.meta.shape = (1, )
        self.ExportResult.meta.dtype = object

        # Provide default values
        self.ExportDirectory.setValue('')
        self.Format.setValue(ExportFormat.H5)
        self.Suffix.setValue('_results')
        self.Dirty.setValue(True)

        self.progressSignal = OrderedSignal()
        self.ProgressSignal.setValue(self.progressSignal)

        self._createDirLock = threading.Lock()

        #make a cache of the input image not to request too much
        self.ImageCache = OpBlockedArrayCache(parent=self)
        self.ImageCache.fixAtCurrent.setValue(False)
        self.ImageCache.Input.connect(self.ImageToExport)
Exemplo n.º 13
0
 def __init__(self, *args, **kwargs):
     super(OpTrainCounter, self).__init__(*args, **kwargs)
     self.progressSignal = OrderedSignal()
     self._svr = SVR()
     params = self._svr.get_params()
     self.initInputs(params)
     self.Classifier.meta.dtype = object
     self.Classifier.meta.shape = (self.numRegressors,)
Exemplo n.º 14
0
    def __init__(
        self,
        h5N5File=None,
        h5N5Path=None,
        Image=None,
        BatchSize: int = None,
        CompressionEnabled: bool = None,
        *args,
        **kwargs,
    ):
        super().__init__(*args, **kwargs)

        self.progressSignal = OrderedSignal()
        self.d = None
        self.f = None

        self.h5N5File.setOrConnectIfAvailable(h5N5File)
        self.h5N5Path.setOrConnectIfAvailable(h5N5Path)
        self.Image.setOrConnectIfAvailable(Image)
        self.BatchSize.setOrConnectIfAvailable(BatchSize)
        self.CompressionEnabled.setOrConnectIfAvailable(CompressionEnabled)
Exemplo n.º 15
0
    def __init__(self, *args, **kwargs):
        super(OpTrainSupervoxelClassifierBlocked, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
        self._mode = None

        # Fully connect the vectorwise training operator
        self._opVectorwiseTrain = OpTrainSupervoxelwiseClassifierBlocked(parent=self)
        self._opVectorwiseTrain.Images.connect(self.Images)
        self._opVectorwiseTrain.SupervoxelSegmentation.connect(self.SupervoxelSegmentation)
        self._opVectorwiseTrain.SupervoxelFeatures.connect(self.SupervoxelFeatures)
        self._opVectorwiseTrain.SupervoxelLabels.connect(self.SupervoxelLabels)
        self._opVectorwiseTrain.Labels.connect(self.Labels)
        self._opVectorwiseTrain.ClassifierFactory.connect(self.ClassifierFactory)
        self._opVectorwiseTrain.progressSignal.subscribe(self.progressSignal)
Exemplo n.º 16
0
    def __init__(self, *args, **kwargs):
        super(OpBatchIo, self).__init__(*args, **kwargs)

        self._opExportedImageProvider = None

        self.Dirty.meta.shape = (1, )
        self.Dirty.meta.dtype = bool
        self.OutputDataPath.meta.shape = (1, )
        self.OutputDataPath.meta.dtype = object
        self.ExportResult.meta.shape = (1, )
        self.ExportResult.meta.dtype = object

        # Default to Dirty
        self.Dirty.setValue(True)

        self.progressSignal = OrderedSignal()
        self.ProgressSignal.setValue(self.progressSignal)

        self._createDirLock = threading.Lock()
Exemplo n.º 17
0
    def __init__(self, *args, **kwargs):
        super(OpBatchIo, self).__init__(*args, **kwargs)

        self.Dirty.meta.shape = (1, )
        self.Dirty.meta.dtype = bool
        self.OutputDataPath.meta.shape = (1, )
        self.OutputDataPath.meta.dtype = object
        self.ExportResult.meta.shape = (1, )
        self.ExportResult.meta.dtype = object

        # Provide default values
        self.ExportDirectory.setValue('')
        self.Format.setValue(ExportFormat.H5)
        self.Suffix.setValue('_results')
        self.Dirty.setValue(True)

        self.progressSignal = OrderedSignal()
        self.ProgressSignal.setValue(self.progressSignal)

        self._createDirLock = threading.Lock()
Exemplo n.º 18
0
class OpH5WriterBigDataset(Operator):
    name = "H5 File Writer BigDataset"
    category = "Output"

    inputSlots = [InputSlot("hdf5File"), # Must be an already-open hdf5File (or group) for writing to
                  InputSlot("hdf5Path", stype = "string"),
                  InputSlot("Image"),
                  InputSlot("CompressionEnabled", value=True)]

    outputSlots = [OutputSlot("WriteImage")]

    loggingName = __name__ + ".OpH5WriterBigDataset"
    logger = logging.getLogger(loggingName)
    traceLogger = logging.getLogger("TRACE." + loggingName)

    def __init__(self, *args, **kwargs):
        super(OpH5WriterBigDataset, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
        self.d = None
        self.f = None

    def cleanUp(self):
        super( OpH5WriterBigDataset, self ).cleanUp()
        # Discard the reference to the dataset, to ensure that hdf5 can close the file.
        self.d = None
        self.f = None
        self.progressSignal.clean()

    def setupOutputs(self):
        self.outputs["WriteImage"].meta.shape = (1,)
        self.outputs["WriteImage"].meta.dtype = object

        self.f = self.inputs["hdf5File"].value
        hdf5Path = self.inputs["hdf5Path"].value
        
        # On windows, there may be backslashes.
        hdf5Path = hdf5Path.replace('\\', '/')

        hdf5GroupName, datasetName = os.path.split(hdf5Path)
        if hdf5GroupName == "":
            g = self.f
        else:
            if hdf5GroupName in self.f:
                g = self.f[hdf5GroupName]
            else:
                g = self.f.create_group(hdf5GroupName)

        dataShape=self.Image.meta.shape
        taggedShape = self.Image.meta.getTaggedShape()
        dtype = self.Image.meta.dtype
        if type(dtype) is numpy.dtype:
            # Make sure we're dealing with a type (e.g. numpy.float64),
            #  not a numpy.dtype
            dtype = dtype.type

        numChannels = 1
        if 'c' in taggedShape:
            numChannels = taggedShape['c']

        # Set up our chunk shape: Aim for a cube that's roughly 300k in size
        dtypeBytes = dtype().nbytes
        cubeDim = math.pow( 300000 / (numChannels * dtypeBytes), (1/3.0) )
        cubeDim = int(cubeDim)

        chunkDims = {}
        chunkDims['t'] = 1
        chunkDims['x'] = cubeDim
        chunkDims['y'] = cubeDim
        chunkDims['z'] = cubeDim
        chunkDims['c'] = numChannels
        
        # h5py guide to chunking says chunks of 300k or less "work best"
        assert chunkDims['x'] * chunkDims['y'] * chunkDims['z'] * numChannels * dtypeBytes  <= 300000

        chunkShape = ()
        for i in range( len(dataShape) ):
            axisKey = self.Image.meta.axistags[i].key
            # Chunk shape can't be larger than the data shape
            chunkShape += ( min( chunkDims[axisKey], dataShape[i] ), )

        self.chunkShape = chunkShape
        if datasetName in g.keys():
            del g[datasetName]
        kwargs = { 'shape' : dataShape, 'dtype' : dtype, 'chunks' : self.chunkShape }
        if self.CompressionEnabled.value:
            kwargs['compression'] = 'gzip' # <-- Would be nice to use lzf compression here, but that is h5py-specific.
            kwargs['compression_opts'] = 1 # <-- Optimize for speed, not disk space.
        self.d=g.create_dataset(datasetName, **kwargs)

        if self.Image.meta.drange is not None:
            self.d.attrs['drange'] = self.Image.meta.drange

    def execute(self, slot, subindex, rroi, result):
        self.progressSignal(0)
        
        slicings=self.computeRequestSlicings()
        numSlicings = len(slicings)

        self.logger.debug( "Dividing work into {} pieces".format( len(slicings) ) )

        # Throttle: Only allow 10 outstanding requests at a time.
        # Otherwise, the whole set of requests can be outstanding and use up ridiculous amounts of memory.        
        activeRequests = deque()
        activeSlicings = deque()
        # Start by activating 10 requests 
        for i in range( min(10, len(slicings)) ):
            s = slicings.pop()
            activeSlicings.append(s)
            self.logger.debug( "Creating request for slicing {}".format(s) )
            activeRequests.append( self.inputs["Image"][s] )
        
        counter = 0

        while len(activeRequests) > 0:
            # Wait for a request to finish
            req = activeRequests.popleft()
            s=activeSlicings.popleft()
            data = req.wait()
            if data.flags.c_contiguous:
                self.d.write_direct(data.view(numpy.ndarray), dest_sel=s)
            else:
                self.d[s] = data
            
            req.clean() # Discard the data in the request and allow its children to be garbage collected.

            if len(slicings) > 0:
                # Create a new active request
                s = slicings.pop()
                activeSlicings.append(s)
                activeRequests.append( self.inputs["Image"][s] )
            
            # Since requests finish in an arbitrary order (but we always block for them in the same order),
            # this progress feedback will not be smooth.  It's the best we can do for now.
            self.progressSignal( 100*counter/numSlicings )
            self.logger.debug( "request {} out of {} executed".format( counter, numSlicings ) )
            counter += 1

        # Save the axistags as a dataset attribute
        self.d.attrs['axistags'] = self.Image.meta.axistags.toJSON()

        # Be paranoid: Flush right now.
        self.f.file.flush()

        # We're finished.
        result[0] = True

        self.progressSignal(100)

    def computeRequestSlicings(self):
        #TODO: reimplement the request better
        shape=numpy.asarray(self.inputs['Image'].meta.shape)

        chunkShape = numpy.asarray(self.chunkShape)

        # Choose a request shape that is a multiple of the chunk shape
        axistags = self.Image.meta.axistags
        multipliers = { 'x':5, 'y':5, 'z':5, 't':1, 'c':100 } # For most problems, there is little advantage to breaking up the channels.
        multiplier = [multipliers[tag.key] for tag in axistags ]
        shift = chunkShape * numpy.array(multiplier)
        shift=numpy.minimum(shift,shape)
        start=numpy.asarray([0]*len(shape))

        stop=shift
        reqList=[]

        #shape = shape - (numpy.mod(numpy.asarray(shape),
        #                  shift))

        for indices in product(*[range(0, stop, step)
                        for stop,step in zip(shape, shift)]):

            start=numpy.asarray(indices)
            stop=numpy.minimum(start+shift,shape)
            reqList.append(roiToSlice(start,stop))
        return reqList

    def propagateDirty(self, slot, subindex, roi):
        # The output from this operator isn't generally connected to other operators.
        # If someone is using it that way, we'll assume that the user wants to know that 
        #  the input image has become dirty and may need to be written to disk again.
        self.WriteImage.setDirty(slice(None))
Exemplo n.º 19
0
class OpTrainPixelwiseClassifierBlocked(Operator):
    Images = InputSlot(level=1)
    Labels = InputSlot(level=1)
    ClassifierFactory = InputSlot()
    nonzeroLabelBlocks = InputSlot(level=1)
    MaxLabel = InputSlot()
    
    Classifier = OutputSlot()

    def __init__(self, *args, **kwargs):
        super(OpTrainPixelwiseClassifierBlocked, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
    
    def setupOutputs(self):
        for slot in list(self.Images) + list(self.Labels):
            assert slot.meta.getAxisKeys()[-1] == 'c', \
                "This opearator assumes channel is the last axis."
        
        self.Classifier.meta.dtype = object
        self.Classifier.meta.shape = (1,)

        # Special metadata for downstream operators using the classifier
        self.Classifier.meta.classifier_factory = self.ClassifierFactory.value

    def cleanUp(self):
        self.progressSignal.clean()
        super( OpTrainPixelwiseClassifierBlocked, self ).cleanUp()

    def execute(self, slot, subindex, roi, result):
        classifier_factory = self.ClassifierFactory.value
        assert issubclass(type(classifier_factory), LazyflowPixelwiseClassifierFactoryABC), \
            "Factory is of type {}, which does not satisfy the LazyflowPixelwiseClassifierFactoryABC interface."\
            "".format( type(classifier_factory) )
        
        # Accumulate all non-zero blocks of each image into lists
        label_data_blocks = []
        image_data_blocks = []
        for image_slot, label_slot, nonzero_block_slot in zip(self.Images, self.Labels, self.nonzeroLabelBlocks):
            block_slicings = nonzero_block_slot.value
            for block_slicing in block_slicings:
                block_label_roi = sliceToRoi( block_slicing, image_slot.meta.shape )

                # Ask for the halo needed by the classifier
                axiskeys = image_slot.meta.getAxisKeys()
                halo_shape = classifier_factory.get_halo_shape(axiskeys)
                assert len(halo_shape) == len( block_label_roi[0] )
                assert halo_shape[-1] == 0, "Didn't expect a non-zero halo for channel dimension."

                # Expand block by halo, then clip to image bounds
                block_label_roi = numpy.array( block_label_roi )
                block_label_roi[0] -= halo_shape
                block_label_roi[1] += halo_shape
                block_label_roi = getIntersection( block_label_roi, roiFromShape(image_slot.meta.shape) )

                block_image_roi = numpy.array( block_label_roi )
                assert (block_image_roi[:, -1] == [0,1]).all()
                num_channels = image_slot.meta.shape[-1]
                block_image_roi[:, -1] = [0, num_channels]

                # Ensure the results are plain ndarray, not VigraArray, 
                #  which some classifiers might have trouble with.
                block_label_data = numpy.asarray( label_slot(*block_label_roi).wait() )
                block_image_data = numpy.asarray( image_slot(*block_image_roi).wait() )
                
                label_data_blocks.append( block_label_data )
                image_data_blocks.append( block_image_data )
                
        logger.debug("Training new classifier: {}".format( classifier_factory.description ))
        classifier = classifier_factory.create_and_train_pixelwise( image_data_blocks, label_data_blocks )
        assert issubclass(type(classifier), LazyflowPixelwiseClassifierABC), \
            "Classifier is of type {}, which does not satisfy the LazyflowPixelwiseClassifierABC interface."\
            "".format( type(classifier) )
        result[0] = classifier
        return result

    def propagateDirty(self, slot, subindex, roi):
        self.Classifier.setDirty()
Exemplo n.º 20
0
class OpTrainPixelwiseClassifierBlocked(Operator):
    Images = InputSlot(level=1)
    Labels = InputSlot(level=1)
    ClassifierFactory = InputSlot()
    nonzeroLabelBlocks = InputSlot(level=1)
    MaxLabel = InputSlot()
    
    Classifier = OutputSlot()

    def __init__(self, *args, **kwargs):
        super(OpTrainPixelwiseClassifierBlocked, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()

        # Normally, lane removal does not trigger a dirty notification.
        # But in this case, if the lane contained any label data whatsoever,
        #  the classifier needs to be marked dirty.
        # We know which slots contain (or contained) label data because they have
        # been 'touched' at some point (they became dirty at some point).
        self._touched_slots = set()
        def handle_new_lane( multislot, index, newlength ):
            def handle_dirty_lane( slot, roi ):
                self._touched_slots.add(slot)
            multislot[index].notifyDirty( handle_dirty_lane )
        self.Labels.notifyInserted( handle_new_lane )

        def handle_remove_lane( multislot, index, newlength ):
            # If the lane we're removing contained
            # label data, then mark the downstream dirty
            if multislot[index] in self._touched_slots:
                self.Classifier.setDirty()
                self._touched_slots.remove(multislot[index])
        self.Labels.notifyRemove( handle_remove_lane )
    
    def setupOutputs(self):
        for slot in list(self.Images) + list(self.Labels):
            assert slot.meta.getAxisKeys()[-1] == 'c', \
                "This opearator assumes channel is the last axis."
        
        self.Classifier.meta.dtype = object
        self.Classifier.meta.shape = (1,)

        # Special metadata for downstream operators using the classifier
        self.Classifier.meta.classifier_factory = self.ClassifierFactory.value

    def cleanUp(self):
        self.progressSignal.clean()
        super( OpTrainPixelwiseClassifierBlocked, self ).cleanUp()

    def execute(self, slot, subindex, roi, result):
        classifier_factory = self.ClassifierFactory.value
        assert issubclass(type(classifier_factory), LazyflowPixelwiseClassifierFactoryABC), \
            "Factory is of type {}, which does not satisfy the LazyflowPixelwiseClassifierFactoryABC interface."\
            "".format( type(classifier_factory) )
        
        # Accumulate all non-zero blocks of each image into lists
        label_data_blocks = []
        image_data_blocks = []
        for image_slot, label_slot, nonzero_block_slot in zip(self.Images, self.Labels, self.nonzeroLabelBlocks):
            block_slicings = nonzero_block_slot.value
            for block_slicing in block_slicings:
                # Get labels
                block_label_roi = sliceToRoi( block_slicing, label_slot.meta.shape )
                block_label_data = label_slot(*block_label_roi).wait()
                
                # Shrink roi to bounding box of actual label pixels
                bb_roi_within_block = nonzero_bounding_box(block_label_data)
                block_label_bb_roi = bb_roi_within_block + block_label_roi[0]

                # Double-check that there is at least 1 non-zero label in the block.
                if (block_label_bb_roi[1] > block_label_bb_roi[0]).all():
                    # Ask for the halo needed by the classifier
                    axiskeys = image_slot.meta.getAxisKeys()
                    halo_shape = classifier_factory.get_halo_shape(axiskeys)
                    assert len(halo_shape) == len( block_label_roi[0] )
                    assert halo_shape[-1] == 0, "Didn't expect a non-zero halo for channel dimension."
    
                    # Expand block by halo, but keep clipped to image bounds
                    padded_label_roi, bb_roi_within_padded = enlargeRoiForHalo( *block_label_bb_roi, 
                                                                                shape=label_slot.meta.shape,
                                                                                sigma=halo_shape,
                                                                                window=1,
                                                                                return_result_roi=True )
                    
                    # Copy labels to new array, which has size == bounding-box + halo
                    padded_label_data = numpy.zeros( padded_label_roi[1] - padded_label_roi[0], label_slot.meta.dtype )                
                    padded_label_data[roiToSlice(*bb_roi_within_padded)] = block_label_data[roiToSlice(*bb_roi_within_block)]
    
                    padded_image_roi = numpy.array( padded_label_roi )
                    assert (padded_image_roi[:, -1] == [0,1]).all()
                    num_channels = image_slot.meta.shape[-1]
                    padded_image_roi[:, -1] = [0, num_channels]
    
                    # Ensure the results are plain ndarray, not VigraArray, 
                    #  which some classifiers might have trouble with.
                    padded_image_data = numpy.asarray( image_slot(*padded_image_roi).wait() )
                    
                    label_data_blocks.append( padded_label_data )
                    image_data_blocks.append( padded_image_data )

        if len(image_data_blocks) == 0:
            result[0] = None
        else:
            channel_names = self.Images[0].meta.channel_names
            axistags = self.Images[0].meta.axistags
            logger.debug("Training new pixelwise classifier: {}".format( classifier_factory.description ))
            classifier = classifier_factory.create_and_train_pixelwise( image_data_blocks, label_data_blocks, axistags, channel_names )
            result[0] = classifier
            if classifier is not None:
                assert issubclass(type(classifier), LazyflowPixelwiseClassifierABC), \
                    "Classifier is of type {}, which does not satisfy the LazyflowPixelwiseClassifierABC interface."\
                    "".format( type(classifier) )

    def propagateDirty(self, slot, subindex, roi):
        self.Classifier.setDirty()
Exemplo n.º 21
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.progressSignal = OrderedSignal()
     self.opStackLoader = OpStackLoader(parent=self)
     self.opStackLoader.globstring.connect(self.GlobString)
Exemplo n.º 22
0
class OpH5WriterBigDataset(Operator):
    name = "H5 File Writer BigDataset"
    category = "Output"

    inputSlots = [InputSlot("hdf5File"), # Must be an already-open hdf5File (or group) for writing to
                  InputSlot("hdf5Path", stype = "string"),
                  InputSlot("Image"),
                  InputSlot("CompressionEnabled", value=True)]

    outputSlots = [OutputSlot("WriteImage")]

    loggingName = __name__ + ".OpH5WriterBigDataset"
    logger = logging.getLogger(loggingName)
    traceLogger = logging.getLogger("TRACE." + loggingName)

    def __init__(self, *args, **kwargs):
        super(OpH5WriterBigDataset, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
        self.d = None
        self.f = None

    def cleanUp(self):
        super( OpH5WriterBigDataset, self ).cleanUp()
        # Discard the reference to the dataset, to ensure that hdf5 can close the file.
        self.d = None
        self.f = None
        self.progressSignal.clean()

    def setupOutputs(self):
        self.outputs["WriteImage"].meta.shape = (1,)
        self.outputs["WriteImage"].meta.dtype = object

        self.f = self.inputs["hdf5File"].value
        hdf5Path = self.inputs["hdf5Path"].value
        
        # On windows, there may be backslashes.
        hdf5Path = hdf5Path.replace('\\', '/')

        hdf5GroupName, datasetName = os.path.split(hdf5Path)
        if hdf5GroupName == "":
            g = self.f
        else:
            if hdf5GroupName in self.f:
                g = self.f[hdf5GroupName]
            else:
                g = self.f.create_group(hdf5GroupName)

        dataShape=self.Image.meta.shape
        taggedShape = self.Image.meta.getTaggedShape()
        dtype = self.Image.meta.dtype
        if type(dtype) is numpy.dtype:
            # Make sure we're dealing with a type (e.g. numpy.float64),
            #  not a numpy.dtype
            dtype = dtype.type

        numChannels = 1
        if 'c' in taggedShape:
            numChannels = taggedShape['c']

        # Set up our chunk shape: Aim for a cube that's roughly 300k in size
        dtypeBytes = dtype().nbytes
        cubeDim = math.pow( 300000 / (numChannels * dtypeBytes), (1/3.0) )
        cubeDim = int(cubeDim)

        chunkDims = {}
        chunkDims['t'] = 1
        chunkDims['x'] = cubeDim
        chunkDims['y'] = cubeDim
        chunkDims['z'] = cubeDim
        chunkDims['c'] = numChannels
        
        # h5py guide to chunking says chunks of 300k or less "work best"
        assert chunkDims['x'] * chunkDims['y'] * chunkDims['z'] * numChannels * dtypeBytes  <= 300000

        chunkShape = ()
        for i in range( len(dataShape) ):
            axisKey = self.Image.meta.axistags[i].key
            # Chunk shape can't be larger than the data shape
            chunkShape += ( min( chunkDims[axisKey], dataShape[i] ), )

        self.chunkShape = chunkShape
        if datasetName in g.keys():
            del g[datasetName]
        kwargs = { 'shape' : dataShape, 'dtype' : dtype, 'chunks' : self.chunkShape }
        if self.CompressionEnabled.value:
            kwargs['compression'] = 'gzip' # <-- Would be nice to use lzf compression here, but that is h5py-specific.
            kwargs['compression_opts'] = 1 # <-- Optimize for speed, not disk space.
        self.d=g.create_dataset(datasetName, **kwargs)

        if self.Image.meta.drange is not None:
            self.d.attrs['drange'] = self.Image.meta.drange

    def execute(self, slot, subindex, rroi, result):
        self.progressSignal(0)
        
        slicings=self.computeRequestSlicings()
        numSlicings = len(slicings)

        self.logger.debug( "Dividing work into {} pieces".format( len(slicings) ) )

        # Throttle: Only allow 10 outstanding requests at a time.
        # Otherwise, the whole set of requests can be outstanding and use up ridiculous amounts of memory.        
        activeRequests = deque()
        activeSlicings = deque()
        # Start by activating 10 requests 
        for i in range( min(10, len(slicings)) ):
            s = slicings.pop()
            activeSlicings.append(s)
            self.logger.debug( "Creating request for slicing {}".format(s) )
            activeRequests.append( self.inputs["Image"][s] )
        
        counter = 0

        while len(activeRequests) > 0:
            # Wait for a request to finish
            req = activeRequests.popleft()
            s=activeSlicings.popleft()
            data = req.wait()
            if data.flags.c_contiguous:
                self.d.write_direct(data.view(numpy.ndarray), dest_sel=s)
            else:
                self.d[s] = data
            
            req.clean() # Discard the data in the request and allow its children to be garbage collected.

            if len(slicings) > 0:
                # Create a new active request
                s = slicings.pop()
                activeSlicings.append(s)
                activeRequests.append( self.inputs["Image"][s] )
            
            # Since requests finish in an arbitrary order (but we always block for them in the same order),
            # this progress feedback will not be smooth.  It's the best we can do for now.
            self.progressSignal( 100*counter/numSlicings )
            self.logger.debug( "request {} out of {} executed".format( counter, numSlicings ) )
            counter += 1

        # Save the axistags as a dataset attribute
        self.d.attrs['axistags'] = self.Image.meta.axistags.toJSON()

        # Be paranoid: Flush right now.
        self.f.file.flush()

        # We're finished.
        result[0] = True

        self.progressSignal(100)

    def computeRequestSlicings(self):
        #TODO: reimplement the request better
        shape=numpy.asarray(self.inputs['Image'].meta.shape)

        chunkShape = numpy.asarray(self.chunkShape)

        # Choose a request shape that is a multiple of the chunk shape
        axistags = self.Image.meta.axistags
        multipliers = { 'x':5, 'y':5, 'z':5, 't':1, 'c':100 } # For most problems, there is little advantage to breaking up the channels.
        multiplier = [multipliers[tag.key] for tag in axistags ]
        shift = chunkShape * numpy.array(multiplier)
        shift=numpy.minimum(shift,shape)
        start=numpy.asarray([0]*len(shape))

        stop=shift
        reqList=[]

        #shape = shape - (numpy.mod(numpy.asarray(shape),
        #                  shift))

        for indices in product(*[range(0, stop, step)
                        for stop,step in zip(shape, shift)]):

            start=numpy.asarray(indices)
            stop=numpy.minimum(start+shift,shape)
            reqList.append(roiToSlice(start,stop))
        return reqList

    def propagateDirty(self, slot, subindex, roi):
        # The output from this operator isn't generally connected to other operators.
        # If someone is using it that way, we'll assume that the user wants to know that 
        #  the input image has become dirty and may need to be written to disk again.
        self.WriteImage.setDirty(slice(None))
Exemplo n.º 23
0
 def __init__(self, *args, **kwargs):
     super(OpTrainPixelwiseClassifierBlocked, self).__init__(*args, **kwargs)
     self.progressSignal = OrderedSignal()
Exemplo n.º 24
0
 def __init__(self, *args, **kwargs):
     super(OpTrainClassifierBlocked, self).__init__(*args, **kwargs)
     self.progressSignal = OrderedSignal()
     self._mode = None
     self._training_op = None
Exemplo n.º 25
0
class OpH5WriterBigDataset(Operator):
    name = "H5 File Writer BigDataset"
    category = "Output"

    inputSlots = [
        InputSlot(
            "hdf5File"
        ),  # Must be an already-open hdf5File (or group) for writing to
        InputSlot("hdf5Path", stype="string"),
        InputSlot("Image"),
        InputSlot("CompressionEnabled", value=True),
        InputSlot("BatchSize", optional=True)
    ]

    outputSlots = [OutputSlot("WriteImage")]

    loggingName = __name__ + ".OpH5WriterBigDataset"
    logger = logging.getLogger(loggingName)
    traceLogger = logging.getLogger("TRACE." + loggingName)

    def __init__(self, *args, **kwargs):
        super(OpH5WriterBigDataset, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
        self.d = None
        self.f = None

    def cleanUp(self):
        super(OpH5WriterBigDataset, self).cleanUp()
        # Discard the reference to the dataset, to ensure that hdf5 can close the file.
        self.d = None
        self.f = None
        self.progressSignal.clean()

    def setupOutputs(self):
        self.outputs["WriteImage"].meta.shape = (1, )
        self.outputs["WriteImage"].meta.dtype = object

        self.f = self.inputs["hdf5File"].value
        hdf5Path = self.inputs["hdf5Path"].value

        # On windows, there may be backslashes.
        hdf5Path = hdf5Path.replace('\\', '/')

        hdf5GroupName, datasetName = os.path.split(hdf5Path)
        if hdf5GroupName == "":
            g = self.f
        else:
            if hdf5GroupName in self.f:
                g = self.f[hdf5GroupName]
            else:
                g = self.f.create_group(hdf5GroupName)

        dataShape = self.Image.meta.shape
        self.logger.info("Data shape: {}".format(dataShape))

        dtype = self.Image.meta.dtype
        if type(dtype) is numpy.dtype:
            # Make sure we're dealing with a type (e.g. numpy.float64),
            #  not a numpy.dtype
            dtype = dtype.type
        # Set up our chunk shape: Aim for a cube that's roughly 512k in size
        dtypeBytes = dtype().nbytes

        tagged_maxshape = self.Image.meta.getTaggedShape()
        if 't' in tagged_maxshape:
            # Assume that chunks should not span multiple t-slices,
            #  and channels are often handled separately, too.
            tagged_maxshape['t'] = 1

        if 'c' in tagged_maxshape:
            tagged_maxshape['c'] = 1

        self.chunkShape = determineBlockShape(tagged_maxshape.values(),
                                              512000.0 / dtypeBytes)

        if datasetName in g.keys():
            del g[datasetName]
        kwargs = {
            'shape': dataShape,
            'dtype': dtype,
            'chunks': self.chunkShape
        }
        if self.CompressionEnabled.value:
            kwargs[
                'compression'] = 'gzip'  # <-- Would be nice to use lzf compression here, but that is h5py-specific.
            kwargs[
                'compression_opts'] = 1  # <-- Optimize for speed, not disk space.
        self.d = g.create_dataset(datasetName, **kwargs)

        if self.Image.meta.drange is not None:
            self.d.attrs['drange'] = self.Image.meta.drange
        if self.Image.meta.display_mode is not None:
            self.d.attrs['display_mode'] = self.Image.meta.display_mode

    def execute(self, slot, subindex, rroi, result):
        self.progressSignal(0)

        # Save the axistags as a dataset attribute
        self.d.attrs['axistags'] = self.Image.meta.axistags.toJSON()

        def handle_block_result(roi, data):
            slicing = roiToSlice(*roi)
            if data.flags.c_contiguous:
                self.d.write_direct(data.view(numpy.ndarray), dest_sel=slicing)
            else:
                self.d[slicing] = data

        batch_size = None
        if self.BatchSize.ready():
            batch_size = self.BatchSize.value
        requester = BigRequestStreamer(self.Image,
                                       roiFromShape(self.Image.meta.shape),
                                       batchSize=batch_size)
        requester.resultSignal.subscribe(handle_block_result)
        requester.progressSignal.subscribe(self.progressSignal)
        requester.execute()

        # Be paranoid: Flush right now.
        self.f.file.flush()

        # We're finished.
        result[0] = True

        self.progressSignal(100)

    def propagateDirty(self, slot, subindex, roi):
        # The output from this operator isn't generally connected to other operators.
        # If someone is using it that way, we'll assume that the user wants to know that
        #  the input image has become dirty and may need to be written to disk again.
        self.WriteImage.setDirty(slice(None))
Exemplo n.º 26
0
 def __init__(self, *args, **kwargs):
     super(OpH5WriterBigDataset, self).__init__(*args, **kwargs)
     self.progressSignal = OrderedSignal()
     self.d = None
     self.f = None
Exemplo n.º 27
0
class OpTikTorchTrainClassifierBlocked(Operator):
    Images = InputSlot(level=1)
    Labels = InputSlot(level=1)
    nonzeroLabelBlocks = InputSlot(level=1)  # Used only in the pixelwise case.
    MaxLabel = InputSlot()
    ModelSession = InputSlot()
    BlockShape = InputSlot(level=1)

    UpdatedModelSession = OutputSlot()

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()

    def setupOutputs(self):
        for slot in [self.Images, self.Labels]:
            assert all(
                [s.meta.getAxisKeys()[-1] == "c" for s in slot]
            ), f"This opearator assumes channel is the last axis. problem: {slot}"

        self.ModelSession.meta.dtype = object
        self.ModelSession.meta.shape = (1, )

    def cleanUp(self):
        self.progressSignal.clean()
        super().cleanUp()

    def _collect_blocks(self, image_slot, label_slot, block_slicings):
        model = self.ModelSession.value
        image_data_blocks = []
        label_data_blocks = []
        block_ids = []
        for block_slicing in block_slicings:
            # Get labels
            block_label_roi = sliceToRoi(block_slicing, label_slot.meta.shape)
            block_label_data = label_slot(*block_label_roi).wait()

            bb_roi_within_block = numpy.array([[0] *
                                               len(block_label_data.shape),
                                               list(block_label_data.shape)])
            block_label_bb_roi = bb_roi_within_block + block_label_roi[0]

            # Ask for the halo needed by the classifier
            axiskeys = image_slot.meta.getAxisKeys()
            halo_shape = model.get_halo(axiskeys)
            assert len(halo_shape) == len(block_label_roi[0])
            assert halo_shape[
                -1] == 0, "Didn't expect a non-zero halo for channel dimension."

            # Expand block by halo, but keep clipped to image bounds
            padded_label_roi, bb_roi_within_padded = enlargeRoiForHalo(
                *block_label_bb_roi,
                shape=label_slot.meta.shape,
                sigma=halo_shape,
                window=1,
                return_result_roi=True)

            # Copy labels to new array, which has size == bounding-box + halo
            padded_label_data = numpy.zeros(
                padded_label_roi[1] - padded_label_roi[0],
                label_slot.meta.dtype)
            padded_label_data[roiToSlice(
                *bb_roi_within_padded)] = block_label_data[roiToSlice(
                    *bb_roi_within_block)]

            padded_image_roi = numpy.array(padded_label_roi)
            assert (padded_image_roi[:, -1] == [0, 1]).all()
            num_channels = image_slot.meta.shape[-1]
            padded_image_roi[:, -1] = [0, num_channels]

            # Ensure the results are plain ndarray, not VigraArray,
            #  which some classifiers might have trouble with.
            padded_image_data = numpy.asarray(
                image_slot(*padded_image_roi).wait())

            image_data_blocks.append(padded_image_data)
            label_data_blocks.append(padded_label_data)
            block_ids.append(
                tuple(
                    int(block_label_bb_roi[0][i])
                    for i, key in enumerate(axiskeys) if key != "c"))

        return image_data_blocks, label_data_blocks, block_ids

    def execute(self, slot, subindex, roi, result):
        model_session = self.ModelSession.value
        result[0] = model_session
        return result

    def propagateDirty(self, slot, subindex, roi):
        if slot == self.Labels:
            if not self.ModelSession.ready():
                return

            try:
                model_session = self.ModelSession.value
                image_slot = self.Images[subindex]
                label_slot = self.Labels[subindex]
                # todo: get block shape in a less hacky way
                block_shape = self.BlockShape[subindex].value
                block_starts = getIntersectingBlocks(block_shape,
                                                     (roi.start, roi.stop))
                label_shape = label_slot.meta.shape
                axis_keys = label_slot.meta.getAxisKeys()
                block_slicings = [[
                    slice(None) if axis == "c" else slice(dmax -
                                                          dblock, dmax) if
                    dstart + dblock > dmax else slice(dstart, dstart + dblock)
                    for dstart, dblock, dmax, axis in zip(
                        block_start, block_shape, label_shape, axis_keys)
                ] for block_start in block_starts]

                image_blocks, label_blocks, block_ids = self._collect_blocks(
                    image_slot, label_slot, block_slicings)
                axistags = self.Images[0].meta.axistags

                model_session.update(image_blocks, label_blocks, axistags,
                                     block_ids)
            except Exception as e:
                logger.error(e, exc_info=True)
        else:
            self.UpdatedModelSession.setDirty()
Exemplo n.º 28
0
 def __init__(self, *args, **kwargs):
     super(OpH5WriterBigDataset, self).__init__(*args, **kwargs)
     self.progressSignal = OrderedSignal()
     self.d = None
     self.f = None
Exemplo n.º 29
0
class OpTrainPixelwiseClassifierBlocked(Operator):
    Images = InputSlot(level=1)
    Labels = InputSlot(level=1)
    ClassifierFactory = InputSlot()
    nonzeroLabelBlocks = InputSlot(level=1)
    MaxLabel = InputSlot()
    
    Classifier = OutputSlot()

    def __init__(self, *args, **kwargs):
        super(OpTrainPixelwiseClassifierBlocked, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
    
    def setupOutputs(self):
        self.Classifier.meta.dtype = object
        self.Classifier.meta.shape = (1,)

        # Special metadata for downstream operators using the classifier
        self.Classifier.meta.classifier_factory = self.ClassifierFactory.value

    def cleanUp(self):
        self.progressSignal.clean()
        super( OpTrainPixelwiseClassifierBlocked, self ).cleanUp()

    def execute(self, slot, subindex, roi, result):
        classifier_factory = self.ClassifierFactory.value
        assert isinstance(classifier_factory, LazyflowPixelwiseClassifierFactoryABC), \
            "Factory is of type {}, which does not satisfy the LazyflowPixelwiseClassifierFactoryABC interface."\
            "".format( type(classifier_factory) )
        
        # Accumulate all non-zero blocks of each image into lists
        label_data_blocks = []
        image_data_blocks = []
        for image_slot, label_slot, nonzero_block_slot in zip(self.Images, self.Labels, self.nonzeroLabelBlocks):
            block_slicings = nonzero_block_slot.value
            for block_slicing in block_slicings:
                block_label_roi = sliceToRoi( block_slicing, image_slot.meta.shape )
                block_image_roi = numpy.array( block_label_roi )
                assert (block_image_roi[:, -1] == [0,1]).all()
                num_channels = image_slot.meta.shape[-1]
                block_image_roi[:, -1] = [0, num_channels]

                # TODO: Compensate for the halo as specified by the classifier...
                #axiskeys = image_slot.meta.getAxisKeys()
                #halo_shape = classifier_factory.get_halo_shape(axiskeys)

                # Ensure the results are plain ndarray, not VigraArray, 
                #  which some classifiers might have trouble with.
                block_label_data = numpy.asarray( label_slot(*block_label_roi).wait() )
                block_image_data = numpy.asarray( image_slot(*block_image_roi).wait() )
                
                label_data_blocks.append( block_label_data )
                image_data_blocks.append( block_image_data )
                
        classifier = classifier_factory.create_and_train_pixelwise( image_data_blocks, label_data_blocks )
        assert isinstance(classifier, LazyflowPixelwiseClassifierABC), \
            "Classifier is of type {}, which does not satisfy the LazyflowPixelwiseClassifierABC interface."\
            "".format( type(classifier) )
        result[0] = classifier
        return result

    def propagateDirty(self, slot, subindex, roi):
        print 'classifier is dirty...'
        self.Classifier.setDirty()
Exemplo n.º 30
0
class OpTrainPixelwiseClassifierBlocked(Operator):
    Images = InputSlot(level=1)
    Labels = InputSlot(level=1)
    ClassifierFactory = InputSlot()
    nonzeroLabelBlocks = InputSlot(level=1)
    MaxLabel = InputSlot()

    Classifier = OutputSlot()

    def __init__(self, *args, **kwargs):
        super(OpTrainPixelwiseClassifierBlocked,
              self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()

        # Normally, lane removal does not trigger a dirty notification.
        # But in this case, if the lane contained any label data whatsoever,
        #  the classifier needs to be marked dirty.
        # We know which slots contain (or contained) label data because they have
        # been 'touched' at some point (they became dirty at some point).
        self._touched_slots = set()

        def handle_new_lane(multislot, index, newlength):
            def handle_dirty_lane(slot, roi):
                self._touched_slots.add(slot)

            multislot[index].notifyDirty(handle_dirty_lane)

        self.Labels.notifyInserted(handle_new_lane)

        def handle_remove_lane(multislot, index, newlength):
            # If the lane we're removing contained
            # label data, then mark the downstream dirty
            if multislot[index] in self._touched_slots:
                self.Classifier.setDirty()
                self._touched_slots.remove(multislot[index])

        self.Labels.notifyRemove(handle_remove_lane)

    def setupOutputs(self):
        for slot in list(self.Images) + list(self.Labels):
            assert slot.meta.getAxisKeys()[-1] == 'c', \
                "This opearator assumes channel is the last axis."

        self.Classifier.meta.dtype = object
        self.Classifier.meta.shape = (1, )

        # Special metadata for downstream operators using the classifier
        self.Classifier.meta.classifier_factory = self.ClassifierFactory.value

    def cleanUp(self):
        self.progressSignal.clean()
        super(OpTrainPixelwiseClassifierBlocked, self).cleanUp()

    def execute(self, slot, subindex, roi, result):
        classifier_factory = self.ClassifierFactory.value
        assert issubclass(type(classifier_factory), LazyflowPixelwiseClassifierFactoryABC), \
            "Factory is of type {}, which does not satisfy the LazyflowPixelwiseClassifierFactoryABC interface."\
            "".format( type(classifier_factory) )

        # Accumulate all non-zero blocks of each image into lists
        label_data_blocks = []
        image_data_blocks = []
        for image_slot, label_slot, nonzero_block_slot in zip(
                self.Images, self.Labels, self.nonzeroLabelBlocks):
            block_slicings = nonzero_block_slot.value
            for block_slicing in block_slicings:
                # Get labels
                block_label_roi = sliceToRoi(block_slicing,
                                             label_slot.meta.shape)
                block_label_data = label_slot(*block_label_roi).wait()

                # Shrink roi to bounding box of actual label pixels
                bb_roi_within_block = nonzero_bounding_box(block_label_data)
                block_label_bb_roi = bb_roi_within_block + block_label_roi[0]

                # Double-check that there is at least 1 non-zero label in the block.
                if (block_label_bb_roi[1] > block_label_bb_roi[0]).all():
                    # Ask for the halo needed by the classifier
                    axiskeys = image_slot.meta.getAxisKeys()
                    halo_shape = classifier_factory.get_halo_shape(axiskeys)
                    assert len(halo_shape) == len(block_label_roi[0])
                    assert halo_shape[
                        -1] == 0, "Didn't expect a non-zero halo for channel dimension."

                    # Expand block by halo, but keep clipped to image bounds
                    padded_label_roi, bb_roi_within_padded = enlargeRoiForHalo(
                        *block_label_bb_roi,
                        shape=label_slot.meta.shape,
                        sigma=halo_shape,
                        window=1,
                        return_result_roi=True)

                    # Copy labels to new array, which has size == bounding-box + halo
                    padded_label_data = numpy.zeros(
                        padded_label_roi[1] - padded_label_roi[0],
                        label_slot.meta.dtype)
                    padded_label_data[roiToSlice(
                        *bb_roi_within_padded)] = block_label_data[roiToSlice(
                            *bb_roi_within_block)]

                    padded_image_roi = numpy.array(padded_label_roi)
                    assert (padded_image_roi[:, -1] == [0, 1]).all()
                    num_channels = image_slot.meta.shape[-1]
                    padded_image_roi[:, -1] = [0, num_channels]

                    # Ensure the results are plain ndarray, not VigraArray,
                    #  which some classifiers might have trouble with.
                    padded_image_data = numpy.asarray(
                        image_slot(*padded_image_roi).wait())

                    label_data_blocks.append(padded_label_data)
                    image_data_blocks.append(padded_image_data)

        if len(image_data_blocks) == 0:
            result[0] = None
        else:
            channel_names = self.Images[0].meta.channel_names
            axistags = self.Images[0].meta.axistags
            logger.debug("Training new pixelwise classifier: {}".format(
                classifier_factory.description))
            classifier = classifier_factory.create_and_train_pixelwise(
                image_data_blocks, label_data_blocks, axistags, channel_names)
            result[0] = classifier
            if classifier is not None:
                assert issubclass(type(classifier), LazyflowPixelwiseClassifierABC), \
                    "Classifier is of type {}, which does not satisfy the LazyflowPixelwiseClassifierABC interface."\
                    "".format( type(classifier) )

    def propagateDirty(self, slot, subindex, roi):
        self.Classifier.setDirty()
Exemplo n.º 31
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.progressSignal = OrderedSignal()
     self.d = None
     self.f = None
Exemplo n.º 32
0
class OpTrainPixelwiseClassifierBlocked(Operator):
    Images = InputSlot(level=1)
    Labels = InputSlot(level=1)
    ClassifierFactory = InputSlot()
    nonzeroLabelBlocks = InputSlot(level=1)
    MaxLabel = InputSlot()

    Classifier = OutputSlot()

    def __init__(self, *args, **kwargs):
        super(OpTrainPixelwiseClassifierBlocked,
              self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()

    def setupOutputs(self):
        self.Classifier.meta.dtype = object
        self.Classifier.meta.shape = (1, )

        # Special metadata for downstream operators using the classifier
        self.Classifier.meta.classifier_factory = self.ClassifierFactory.value

    def cleanUp(self):
        self.progressSignal.clean()
        super(OpTrainPixelwiseClassifierBlocked, self).cleanUp()

    def execute(self, slot, subindex, roi, result):
        classifier_factory = self.ClassifierFactory.value
        assert isinstance(classifier_factory, LazyflowPixelwiseClassifierFactoryABC), \
            "Factory is of type {}, which does not satisfy the LazyflowPixelwiseClassifierFactoryABC interface."\
            "".format( type(classifier_factory) )

        # Accumulate all non-zero blocks of each image into lists
        label_data_blocks = []
        image_data_blocks = []
        for image_slot, label_slot, nonzero_block_slot in zip(
                self.Images, self.Labels, self.nonzeroLabelBlocks):
            block_slicings = nonzero_block_slot.value
            for block_slicing in block_slicings:
                block_label_roi = sliceToRoi(block_slicing,
                                             image_slot.meta.shape)
                block_image_roi = numpy.array(block_label_roi)
                assert (block_image_roi[:, -1] == [0, 1]).all()
                num_channels = image_slot.meta.shape[-1]
                block_image_roi[:, -1] = [0, num_channels]

                # TODO: Compensate for the halo as specified by the classifier...
                #axiskeys = image_slot.meta.getAxisKeys()
                #halo_shape = classifier_factory.get_halo_shape(axiskeys)

                # Ensure the results are plain ndarray, not VigraArray,
                #  which some classifiers might have trouble with.
                block_label_data = numpy.asarray(
                    label_slot(*block_label_roi).wait())
                block_image_data = numpy.asarray(
                    image_slot(*block_image_roi).wait())

                label_data_blocks.append(block_label_data)
                image_data_blocks.append(block_image_data)

        classifier = classifier_factory.create_and_train_pixelwise(
            image_data_blocks, label_data_blocks)
        assert isinstance(classifier, LazyflowPixelwiseClassifierABC), \
            "Classifier is of type {}, which does not satisfy the LazyflowPixelwiseClassifierABC interface."\
            "".format( type(classifier) )
        result[0] = classifier
        return result

    def propagateDirty(self, slot, subindex, roi):
        print 'classifier is dirty...'
        self.Classifier.setDirty()
Exemplo n.º 33
0
class OpH5N5WriterBigDataset(Operator):
    name = "H5 and N5 File Writer BigDataset"
    category = "Output"

    h5N5File = InputSlot()  # Must be an already-open hdf5File/n5File (or group) for writing to
    h5N5Path = InputSlot()
    Image = InputSlot()
    # h5py uses single-threaded gzip comression, which really slows down export.
    CompressionEnabled = InputSlot(value=False)
    BatchSize = InputSlot(optional=True)

    WriteImage = OutputSlot()

    loggingName = __name__ + ".OpH5N5WriterBigDataset"
    logger = logging.getLogger(loggingName)
    traceLogger = logging.getLogger("TRACE." + loggingName)

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
        self.d = None
        self.f = None

    def cleanUp(self):
        super().cleanUp()
        # Discard the reference to the dataset, to ensure that the file can be closed.
        self.d = None
        self.f = None
        self.progressSignal.clean()

    def setupOutputs(self):
        self.outputs["WriteImage"].meta.shape = (1,)
        self.outputs["WriteImage"].meta.dtype = object

        self.f = self.inputs["h5N5File"].value
        h5N5Path = self.inputs["h5N5Path"].value

        # On windows, there may be backslashes.
        h5N5Path = h5N5Path.replace("\\", "/")

        h5N5GroupName, datasetName = os.path.split(h5N5Path)
        if h5N5GroupName == "":
            g = self.f
        else:
            if h5N5GroupName in self.f:
                g = self.f[h5N5GroupName]
            else:
                g = self.f.create_group(h5N5GroupName)

        dataShape = self.Image.meta.shape
        self.logger.info(f"Data shape: {dataShape}")

        dtype = self.Image.meta.dtype
        if isinstance(dtype, numpy.dtype):
            # Make sure we're dealing with a type (e.g. numpy.float64),
            # not a numpy.dtype
            dtype = dtype.type
        # Set up our chunk shape: Aim for a cube that's roughly 512k in size
        dtypeBytes = dtype().nbytes

        tagged_maxshape = self.Image.meta.getTaggedShape()
        if "t" in tagged_maxshape:
            # Assume that chunks should not span multiple t-slices,
            # and channels are often handled separately, too.
            tagged_maxshape["t"] = 1

        if "c" in tagged_maxshape:
            tagged_maxshape["c"] = 1

        self.chunkShape = determineBlockShape(list(tagged_maxshape.values()), 512_000.0 / dtypeBytes)

        if datasetName in list(g.keys()):
            del g[datasetName]
        kwargs = {"shape": dataShape, "dtype": dtype, "chunks": self.chunkShape}
        if self.CompressionEnabled.value:
            kwargs["compression"] = "gzip"  # <-- Would be nice to use lzf compression here, but that is h5py-specific.
            if isinstance(self.f, h5py.File):
                kwargs["compression_opts"] = 1  # <-- Optimize for speed, not disk space.
            else:  # z5py has uses different names here
                kwargs["level"] = 1  # <-- Optimize for speed, not disk space.
        else:
            if isinstance(self.f, z5py.N5File):  # n5 uses gzip level 5 as default compression.
                kwargs["compression"] = "raw"

        self.d = g.create_dataset(datasetName, **kwargs)

        if self.Image.meta.drange is not None:
            self.d.attrs["drange"] = self.Image.meta.drange
        if self.Image.meta.display_mode is not None:
            self.d.attrs["display_mode"] = self.Image.meta.display_mode

    def execute(self, slot, subindex, rroi, result):
        self.progressSignal(0)

        # Save the axistags as a dataset attribute
        self.d.attrs["axistags"] = self.Image.meta.axistags.toJSON()

        def handle_block_result(roi, data):
            slicing = roiToSlice(*roi)
            if data.flags.c_contiguous:
                self.d.write_direct(data.view(numpy.ndarray), dest_sel=slicing)
            else:
                self.d[slicing] = data

        batch_size = None
        if self.BatchSize.ready():
            batch_size = self.BatchSize.value
        requester = BigRequestStreamer(self.Image, roiFromShape(self.Image.meta.shape), batchSize=batch_size)
        requester.resultSignal.subscribe(handle_block_result)
        requester.progressSignal.subscribe(self.progressSignal)
        requester.execute()

        # Be paranoid: Flush right now.
        if isinstance(self.f, h5py.File):
            self.f.file.flush()  # not available in z5py

        # We're finished.
        result[0] = True

        self.progressSignal(100)

    def propagateDirty(self, slot, subindex, roi):
        # The output from this operator isn't generally connected to other operators.
        # If someone is using it that way, we'll assume that the user wants to know that
        # the input image has become dirty and may need to be written to disk again.
        self.WriteImage.setDirty(slice(None))
Exemplo n.º 34
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.progressSignal = OrderedSignal()
Exemplo n.º 35
0
class OpTrainPixelwiseClassifierBlocked(Operator):
    Images = InputSlot(level=1)
    Labels = InputSlot(level=1)
    ClassifierFactory = InputSlot()
    nonzeroLabelBlocks = InputSlot(level=1)
    MaxLabel = InputSlot()
    
    Classifier = OutputSlot()

    def __init__(self, *args, **kwargs):
        super(OpTrainPixelwiseClassifierBlocked, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
    
    def setupOutputs(self):
        for slot in list(self.Images) + list(self.Labels):
            assert slot.meta.getAxisKeys()[-1] == 'c', \
                "This opearator assumes channel is the last axis."
        
        self.Classifier.meta.dtype = object
        self.Classifier.meta.shape = (1,)

        # Special metadata for downstream operators using the classifier
        self.Classifier.meta.classifier_factory = self.ClassifierFactory.value

    def cleanUp(self):
        self.progressSignal.clean()
        super( OpTrainPixelwiseClassifierBlocked, self ).cleanUp()

    def execute(self, slot, subindex, roi, result):
        classifier_factory = self.ClassifierFactory.value
        assert issubclass(type(classifier_factory), LazyflowPixelwiseClassifierFactoryABC), \
            "Factory is of type {}, which does not satisfy the LazyflowPixelwiseClassifierFactoryABC interface."\
            "".format( type(classifier_factory) )
        
        # Accumulate all non-zero blocks of each image into lists
        label_data_blocks = []
        image_data_blocks = []
        for image_slot, label_slot, nonzero_block_slot in zip(self.Images, self.Labels, self.nonzeroLabelBlocks):
            block_slicings = nonzero_block_slot.value
            for block_slicing in block_slicings:
                block_label_roi = sliceToRoi( block_slicing, image_slot.meta.shape )

                # Ask for the halo needed by the classifier
                axiskeys = image_slot.meta.getAxisKeys()
                halo_shape = classifier_factory.get_halo_shape(axiskeys)
                assert len(halo_shape) == len( block_label_roi[0] )
                assert halo_shape[-1] == 0, "Didn't expect a non-zero halo for channel dimension."

                # Expand block by halo, then clip to image bounds
                block_label_roi = numpy.array( block_label_roi )
                block_label_roi[0] -= halo_shape
                block_label_roi[1] += halo_shape
                block_label_roi = getIntersection( block_label_roi, roiFromShape(image_slot.meta.shape) )

                block_image_roi = numpy.array( block_label_roi )
                assert (block_image_roi[:, -1] == [0,1]).all()
                num_channels = image_slot.meta.shape[-1]
                block_image_roi[:, -1] = [0, num_channels]

                # Ensure the results are plain ndarray, not VigraArray, 
                #  which some classifiers might have trouble with.
                block_label_data = numpy.asarray( label_slot(*block_label_roi).wait() )
                block_image_data = numpy.asarray( image_slot(*block_image_roi).wait() )
                
                label_data_blocks.append( block_label_data )
                image_data_blocks.append( block_image_data )
                
        classifier = classifier_factory.create_and_train_pixelwise( image_data_blocks, label_data_blocks )
        assert issubclass(type(classifier), LazyflowPixelwiseClassifierABC), \
            "Classifier is of type {}, which does not satisfy the LazyflowPixelwiseClassifierABC interface."\
            "".format( type(classifier) )
        result[0] = classifier
        return result

    def propagateDirty(self, slot, subindex, roi):
        self.Classifier.setDirty()
Exemplo n.º 36
0
class OpH5N5WriterBigDataset(Operator):
    name = "H5 and N5 File Writer BigDataset"
    category = "Output"

    h5N5File = InputSlot()  # Must be an already-open hdf5File/n5File (or group) for writing to
    h5N5Path = InputSlot()
    Image = InputSlot()
    # h5py uses single-threaded gzip comression, which really slows down export.
    CompressionEnabled = InputSlot(value=False)
    BatchSize = InputSlot(optional=True)

    WriteImage = OutputSlot()

    loggingName = __name__ + ".OpH5N5WriterBigDataset"
    logger = logging.getLogger(loggingName)
    traceLogger = logging.getLogger("TRACE." + loggingName)

    def __init__(
        self,
        h5N5File=None,
        h5N5Path=None,
        Image=None,
        BatchSize: int = None,
        CompressionEnabled: bool = None,
        *args,
        **kwargs,
    ):
        super().__init__(*args, **kwargs)

        self.progressSignal = OrderedSignal()
        self.d = None
        self.f = None

        self.h5N5File.setOrConnectIfAvailable(h5N5File)
        self.h5N5Path.setOrConnectIfAvailable(h5N5Path)
        self.Image.setOrConnectIfAvailable(Image)
        self.BatchSize.setOrConnectIfAvailable(BatchSize)
        self.CompressionEnabled.setOrConnectIfAvailable(CompressionEnabled)

    def cleanUp(self):
        super().cleanUp()
        # Discard the reference to the dataset, to ensure that the file can be closed.
        self.d = None
        self.f = None
        self.progressSignal.clean()

    def setupOutputs(self):
        self.outputs["WriteImage"].meta.shape = (1,)
        self.outputs["WriteImage"].meta.dtype = object

        self.f = self.inputs["h5N5File"].value
        h5N5Path = self.inputs["h5N5Path"].value

        # On windows, there may be backslashes.
        h5N5Path = h5N5Path.replace("\\", "/")

        h5N5GroupName, datasetName = os.path.split(h5N5Path)
        if h5N5GroupName == "":
            g = self.f
        else:
            if h5N5GroupName in self.f:
                g = self.f[h5N5GroupName]
            else:
                g = self.f.create_group(h5N5GroupName)

        dataShape = self.Image.meta.shape
        self.logger.info(f"Data shape: {dataShape}")

        dtype = self.Image.meta.dtype
        if isinstance(dtype, numpy.dtype):
            # Make sure we're dealing with a type (e.g. numpy.float64),
            # not a numpy.dtype
            dtype = dtype.type
        # Set up our chunk shape: Aim for a cube that's roughly 512k in size
        dtypeBytes = dtype().nbytes

        tagged_maxshape = self.Image.meta.getTaggedShape()
        if "t" in tagged_maxshape:
            # Assume that chunks should not span multiple t-slices,
            # and channels are often handled separately, too.
            tagged_maxshape["t"] = 1

        if "c" in tagged_maxshape:
            tagged_maxshape["c"] = 1

        self.chunkShape = determineBlockShape(list(tagged_maxshape.values()), 512_000.0 / dtypeBytes)

        if datasetName in list(g.keys()):
            del g[datasetName]
        kwargs = {"shape": dataShape, "dtype": dtype, "chunks": self.chunkShape}
        if self.CompressionEnabled.value:
            kwargs["compression"] = "gzip"  # <-- Would be nice to use lzf compression here, but that is h5py-specific.
            if isinstance(self.f, h5py.File):
                kwargs["compression_opts"] = 1  # <-- Optimize for speed, not disk space.
            else:  # z5py has uses different names here
                kwargs["level"] = 1  # <-- Optimize for speed, not disk space.
        else:
            if isinstance(self.f, z5py.N5File):  # n5 uses gzip level 5 as default compression.
                kwargs["compression"] = "raw"

        self.d = g.create_dataset(datasetName, **kwargs)

        if self.Image.meta.drange is not None:
            self.d.attrs["drange"] = self.Image.meta.drange
        if self.Image.meta.display_mode is not None:
            self.d.attrs["display_mode"] = self.Image.meta.display_mode

    def execute(self, slot, subindex, rroi, result):
        self.progressSignal(0)

        # Save the axistags as a dataset attribute
        self.d.attrs["axistags"] = self.Image.meta.axistags.toJSON()
        if isinstance(self.d, h5py.Dataset):
            for index, tag in enumerate(self.Image.meta.axistags):
                self.d.dims[index].label = tag.key
        else:  # if n5 dataset, apply neuroglancer's axes tags convention
            self.d.attrs["axes"] = "".join(tag.key for tag in self.Image.meta.axistags)[::-1]
        drange = self.Image.meta.get("drange")
        if drange:
            self.d.attrs["drange"] = drange

        def handle_block_result(roi, data):
            slicing = roiToSlice(*roi)
            if data.flags.c_contiguous:
                self.d.write_direct(data.view(numpy.ndarray), dest_sel=slicing)
            else:
                self.d[slicing] = data

        batch_size = None
        if self.BatchSize.ready():
            batch_size = self.BatchSize.value
        requester = BigRequestStreamer(self.Image, roiFromShape(self.Image.meta.shape), batchSize=batch_size)
        requester.resultSignal.subscribe(handle_block_result)
        requester.progressSignal.subscribe(self.progressSignal)
        requester.execute()

        # Be paranoid: Flush right now.
        if isinstance(self.f, h5py.File):
            self.f.file.flush()  # not available in z5py

        # We're finished.
        result[0] = True

        self.progressSignal(100)

    def propagateDirty(self, slot, subindex, roi):
        # The output from this operator isn't generally connected to other operators.
        # If someone is using it that way, we'll assume that the user wants to know that
        # the input image has become dirty and may need to be written to disk again.
        self.WriteImage.setDirty(slice(None))
Exemplo n.º 37
0
 def __init__(self, *args, **kwargs):
     super(OpTaskWorker, self).__init__(*args, **kwargs)
     self.progressSignal = OrderedSignal()
     self._primaryBlockwiseFileset = None
Exemplo n.º 38
0
class OpH5WriterBigDataset(Operator):
    name = "H5 File Writer BigDataset"
    category = "Output"

    inputSlots = [InputSlot("hdf5File"), # Must be an already-open hdf5File (or group) for writing to
                  InputSlot("hdf5Path", stype = "string"),
                  InputSlot("Image"),
                  InputSlot("CompressionEnabled", value=True)]

    outputSlots = [OutputSlot("WriteImage")]

    loggingName = __name__ + ".OpH5WriterBigDataset"
    logger = logging.getLogger(loggingName)
    traceLogger = logging.getLogger("TRACE." + loggingName)

    def __init__(self, *args, **kwargs):
        super(OpH5WriterBigDataset, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
        self.d = None
        self.f = None

    def cleanUp(self):
        super( OpH5WriterBigDataset, self ).cleanUp()
        # Discard the reference to the dataset, to ensure that hdf5 can close the file.
        self.d = None
        self.f = None
        self.progressSignal.clean()

    def setupOutputs(self):
        self.outputs["WriteImage"].meta.shape = (1,)
        self.outputs["WriteImage"].meta.dtype = object

        self.f = self.inputs["hdf5File"].value
        hdf5Path = self.inputs["hdf5Path"].value
        
        # On windows, there may be backslashes.
        hdf5Path = hdf5Path.replace('\\', '/')

        hdf5GroupName, datasetName = os.path.split(hdf5Path)
        if hdf5GroupName == "":
            g = self.f
        else:
            if hdf5GroupName in self.f:
                g = self.f[hdf5GroupName]
            else:
                g = self.f.create_group(hdf5GroupName)

        dataShape=self.Image.meta.shape
        self.logger.info( "Data shape: {}".format(dataShape))

        dtype = self.Image.meta.dtype
        if type(dtype) is numpy.dtype:
            # Make sure we're dealing with a type (e.g. numpy.float64),
            #  not a numpy.dtype
            dtype = dtype.type
        # Set up our chunk shape: Aim for a cube that's roughly 512k in size
        dtypeBytes = dtype().nbytes

        tagged_maxshape = self.Image.meta.getTaggedShape()
        if 't' in tagged_maxshape:
            # Assume that chunks should not span multiple t-slices
            tagged_maxshape['t'] = 1
        
        self.chunkShape = determineBlockShape( tagged_maxshape.values(), 512000.0 / dtypeBytes )

        if datasetName in g.keys():
            del g[datasetName]
        kwargs = { 'shape' : dataShape, 'dtype' : dtype,
            'chunks' : self.chunkShape }
        if self.CompressionEnabled.value:
            kwargs['compression'] = 'gzip' # <-- Would be nice to use lzf compression here, but that is h5py-specific.
            kwargs['compression_opts'] = 1 # <-- Optimize for speed, not disk space.
        self.d=g.create_dataset(datasetName, **kwargs)

        if self.Image.meta.drange is not None:
            self.d.attrs['drange'] = self.Image.meta.drange

    def execute(self, slot, subindex, rroi, result):
        self.progressSignal(0)
        
        # Save the axistags as a dataset attribute
        self.d.attrs['axistags'] = self.Image.meta.axistags.toJSON()

        def handle_block_result(roi, data):
            slicing = roiToSlice(*roi)
            if data.flags.c_contiguous:
                self.d.write_direct(data.view(numpy.ndarray), dest_sel=slicing)
            else:
                self.d[slicing] = data
        requester = BigRequestStreamer( self.Image, roiFromShape( self.Image.meta.shape ) )
        requester.resultSignal.subscribe( handle_block_result )
        requester.progressSignal.subscribe( self.progressSignal )
        requester.execute()            

        # Be paranoid: Flush right now.
        self.f.file.flush()

        # We're finished.
        result[0] = True

        self.progressSignal(100)

    def propagateDirty(self, slot, subindex, roi):
        # The output from this operator isn't generally connected to other operators.
        # If someone is using it that way, we'll assume that the user wants to know that 
        #  the input image has become dirty and may need to be written to disk again.
        self.WriteImage.setDirty(slice(None))
Exemplo n.º 39
0
 def __init__(self, *args, **kwargs):
     super(OpTrainPixelwiseClassifierBlocked, self).__init__(*args, **kwargs)
     self.progressSignal = OrderedSignal()
Exemplo n.º 40
0
 def __init__(self, *args, **kwargs):
     super(OpTrainClassifierFromFeatureVectorsAndSupervoxelMask,
           self).__init__(*args, **kwargs)
     self.trainingCompleteSignal = OrderedSignal()
Exemplo n.º 41
0
 def __init__(self, *args, **kwargs):
     super(OpStackWriter, self).__init__(*args, **kwargs)
     self.progressSignal = OrderedSignal()