Пример #1
0
 def aaaNumpyFile(self):
     g =graph.Graph()
     npfile = "/home/akreshuk/data/synapse_small_4d.npy"
     reader = OpInputDataReader(graph=g)
     reader.FilePath.setValue(npfile)
     #out = reader.Output[:].wait()
     #print out.shape
     
     opFeatures = OpPixelFeaturesPresmoothed(graph=g)
     opFeatures.Scales.setValue(self.scales)
     opFeatures.FeatureIds.setValue(self.featureIds)
     opFeatures.Input.connect(reader.Output)
     opFeatures.Matrix.setValue(self.selectedFeatures[5])
     out = opFeatures.Output[:].wait()
     print out.shape
     
     opFeaturesInterp = OpPixelFeaturesInterpPresmoothed(graph=g)
     opFeaturesInterp.Scales.setValue(self.scales)
     opFeaturesInterp.FeatureIds.setValue(self.featureIds)
     opFeaturesInterp.Input.connect(reader.Output)
     opFeaturesInterp.Matrix.setValue(self.selectedFeatures[5])
     opFeaturesInterp.InterpolationScaleZ.setValue(2)
     out = opFeaturesInterp.Output[:].wait()
     
     print out.shape
Пример #2
0
    def test_compute_in_2d(self):
        op = OpPixelFeaturesPresmoothed(graph=Graph())

        op.Scales.setValue([0.7, 1, 1.6])

        op.FeatureIds.setValue([
            "GaussianSmoothing",
            "LaplacianOfGaussian",
            "StructureTensorEigenvalues",
            "HessianOfGaussianEigenvalues",
            "GaussianGradientMagnitude",
            "DifferenceOfGaussians",
        ])

        op.SelectionMatrix.setValue(
            numpy.array([
                [True, False, True],
                [False, True, False],
                [False, False, True],
                [True, False, False],
                [False, True, False],
                [False, False, True],
            ]))

        # compute result over whole volume in 2d
        op.ComputeIn2d.setValue([True] * 3)
        op.Input.setValue(self.data)
        computed_whole = op.Output[:].wait()

        # compute result for every z slice independently
        z_slices = []
        for z in range(self.data.shape[2]):
            op.Input.setValue(self.data[:, :, z:z + 1])
            z_slices.append(op.Output[:].wait())

        computed_per_slice = numpy.concatenate(z_slices, axis=2)
        assert computed_per_slice.shape == computed_whole.shape

        if DEBUG:
            check_channel = 6
            plt.figure(figsize=(5, 20))
            for z in range(self.data.shape[2]):
                plt.subplot(self.data.shape[2], 2, 2 * z + 1)
                plt.imshow(computed_whole[0, check_channel, z])
                plt.title("whole")
                plt.subplot(self.data.shape[2], 2, 2 * z + 2)
                plt.imshow(computed_per_slice[0, check_channel, z])
                plt.title("per slice")

            plt.show()

        assert computed_whole.shape == computed_per_slice.shape
        assert numpy.allclose(
            computed_whole, computed_per_slice), abs(computed_whole -
                                                     computed_per_slice).max()
Пример #3
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # Create the operator that actually generates the features
        self.opPixelFeatures = OpPixelFeaturesPresmoothed(parent=self)

        # Connect our internal operators to our external inputs
        self.opPixelFeatures.Scales.connect(self.Scales)
        self.opPixelFeatures.FeatureIds.connect(self.FeatureIds)
        self.opPixelFeatures.SelectionMatrix.connect(self.SelectionMatrix)
        self.opPixelFeatures.ComputeIn2d.connect(self.ComputeIn2d)
        self.opReorderIn = OpReorderAxes(parent=self)
        self.opReorderIn.AxisOrder.setValue('tczyx')
        self.opReorderIn.Input.connect(self.InputImage)
        self.opPixelFeatures.Input.connect(self.opReorderIn.Output)
        self.opReorderOut = OpReorderAxes(parent=self)
        self.opReorderOut.Input.connect(self.opPixelFeatures.Output)
        self.opReorderLayers = OperatorWrapper(
            OpReorderAxes, parent=self, broadcastingSlotNames=["AxisOrder"])
        self.opReorderLayers.Input.connect(self.opPixelFeatures.Features)

        self.WINDOW_SIZE = self.opPixelFeatures.WINDOW_SIZE
Пример #4
0
    def __init__(self, *args, **kwargs):
        super(OpFeatureSelectionNoCache, self).__init__(*args, **kwargs)

        # Create the operator that actually generates the features
        self.opPixelFeatures = OpPixelFeaturesPresmoothed(parent=self)

        # Connect our internal operators to our external inputs
        self.opPixelFeatures.Scales.connect(self.Scales)
        self.opPixelFeatures.FeatureIds.connect(self.FeatureIds)
        self.opReorderIn = OpReorderAxes(parent=self)
        self.opReorderIn.Input.connect(self.InputImage)
        self.opPixelFeatures.Input.connect(self.opReorderIn.Output)
        self.opReorderOut = OpReorderAxes(parent=self)
        self.opReorderOut.Input.connect(self.opPixelFeatures.Output)
        self.opReorderLayers = OperatorWrapper(
            OpReorderAxes, parent=self, broadcastingSlotNames=["AxisOrder"])
        self.opReorderLayers.Input.connect(self.opPixelFeatures.Features)

        # We don't connect SelectionMatrix here because we want to
        #  check it for errors (See setupOutputs)
        # self.opPixelFeatures.SelectionMatrix.connect( self.SelectionMatrix )

        self.WINDOW_SIZE = self.opPixelFeatures.WINDOW_SIZE
Пример #5
0
 def runFeatures(self, data, dataInterp):
     g = graph.Graph()
     opFeatures = OpPixelFeaturesPresmoothed(graph=g)
     opFeaturesInterp = OpPixelFeaturesInterpPresmoothed(graph=g)
     
     opFeatures.Input.setValue(dataInterp)
     opFeaturesInterp.Input.setValue(data)
     
     opFeatures.Scales.setValue(self.scales)
     opFeaturesInterp.Scales.setValue(self.scales)
     
     opFeatures.FeatureIds.setValue(self.featureIds)
     opFeaturesInterp.FeatureIds.setValue(self.featureIds)
     
     opFeaturesInterp.InterpolationScaleZ.setValue(self.scaleZ)
     
     #for i, imatrix in enumerate(self.selectedFeatures[0:1]):
     for i, imatrix in enumerate(self.selectedFeatures):
         opFeatures.Matrix.setValue(imatrix)
         opFeaturesInterp.Matrix.setValue(imatrix)
         outputInterpData = opFeatures.Output[:].wait()
         outputInterpFeatures = opFeaturesInterp.Output[:].wait()
         
         for iz in range(self.nz):
         #for iz in range(2, 3):
             #print iz, iz*self.scaleZ
             try:
                 outputInterpDataSlice = opFeatures.Output[:, :, iz*self.scaleZ:iz*self.scaleZ+1, :].wait()
                 outputInterpFeaturesSlice = opFeaturesInterp.Output[:, :, iz, :].wait()
                 assert_array_almost_equal(outputInterpDataSlice, outputInterpFeaturesSlice, 1)
                 assert_array_almost_equal(outputInterpData[:, :, iz*self.scaleZ, 0], outputInterpFeatures[:, :, iz, 0], 1)
                 #assert_array_almost_equal(outputInterpDataSlice[:, :, 0, :], outputInterpData[:, :, iz*self.scaleZ, :], 3)
                 assert_array_almost_equal(outputInterpFeatures[:, :, iz, :], outputInterpFeaturesSlice[:, :, 0, :], 1)
             except AssertionError:
                 print "failed for feature:", imatrix, i
                 print "failed for slice:", iz
                 print "inter data:", outputInterpData[:, :, iz*self.scaleZ, 0]
                 print "inter features:", outputInterpFeatures[:, :, iz, 0]
                 print "inter data slice:", outputInterpDataSlice[:, :, 0, 0]
                 print "inter features:", outputInterpFeaturesSlice[:, :, 0, 0]
                 raise AssertionError
Пример #6
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # Create the operator that actually generates the features
        self.opPixelFeatures = OpPixelFeaturesPresmoothed(parent=self)

        # Connect our internal operators to our external inputs
        self.opPixelFeatures.Scales.connect(self.Scales)
        self.opPixelFeatures.FeatureIds.connect(self.FeatureIds)
        self.opPixelFeatures.SelectionMatrix.connect(self.SelectionMatrix)
        self.opPixelFeatures.ComputeIn2d.connect(self.ComputeIn2d)
        self.opReorderIn = OpReorderAxes(parent=self)
        self.opReorderIn.AxisOrder.setValue('tczyx')
        self.opReorderIn.Input.connect(self.InputImage)
        self.opPixelFeatures.Input.connect(self.opReorderIn.Output)
        self.opReorderOut = OpReorderAxes(parent=self)
        self.opReorderOut.Input.connect(self.opPixelFeatures.Output)
        self.opReorderLayers = OperatorWrapper(OpReorderAxes, parent=self,
                                               broadcastingSlotNames=["AxisOrder"])
        self.opReorderLayers.Input.connect(self.opPixelFeatures.Features)

        self.WINDOW_SIZE = self.opPixelFeatures.WINDOW_SIZE
    def __init__(self, *args, **kwargs):
        super(OpFeatureSelection, self).__init__(*args, **kwargs)

        # Two internal operators: features and cache
        self.opPixelFeatures = OpPixelFeaturesPresmoothed(parent=self)
        self.opPixelFeatureCache = OpSlicedBlockedArrayCache(parent=self)
        self.opPixelFeatureCache.name = "opPixelFeatureCache"

        # Connect the cache to the feature output
        self.opPixelFeatureCache.Input.connect(self.opPixelFeatures.Output)
        self.opPixelFeatureCache.fixAtCurrent.setValue(False)

        # Connect our internal operators to our external inputs 
        self.opPixelFeatures.Scales.connect( self.Scales )
        self.opPixelFeatures.FeatureIds.connect( self.FeatureIds )
        self.opPixelFeatures.Matrix.connect( self.SelectionMatrix )
        self.opPixelFeatures.Input.connect( self.InputImage )
        
        # Connect our external outputs to our internal operators
        self.OutputImage.connect( self.opPixelFeatures.Output )
        self.CachedOutputImage.connect( self.opPixelFeatureCache.Output )
        self.FeatureLayers.connect( self.opPixelFeatures.Features )
Пример #8
0
 def aaaSlices(self):
     g = graph.Graph()
     opFeatures = OpPixelFeaturesPresmoothed(graph=g)
     opFeatures.Scales.setValue(self.scales)
     opFeatures.FeatureIds.setValue(self.featureIds)
     opFeatures.Input.setValue(self.dataNoChannels)
     for i, imatrix in enumerate(self.selectedFeatures):
         opFeatures.Matrix.setValue(imatrix)
         #compute in one piece
         dataOne = opFeatures.Output[:].wait()
         
         #compute slice-wise
         for z in range(self.nz):
             dataSlice = opFeatures.Output[:, :, z:z+1].wait()
             try:
                 assert_array_almost_equal(dataOne[:, :, z:z+1], dataSlice, 2)
             except AssertionError:
                 print "wrong for matrix:", imatrix
                 print "wrong for slice:", z
                 print dataOne[:, :, z:z+1]
                 print dataSlice
                 raise AssertionError
Пример #9
0
    def __init__(self, *args, **kwargs):
        super(OpFeatureSelectionNoCache, self).__init__(*args, **kwargs)

        # Create the operator that actually generates the features
        self.opPixelFeatures = OpPixelFeaturesPresmoothed(parent=self)

        # Connect our internal operators to our external inputs 
        self.opPixelFeatures.Scales.connect( self.Scales )
        self.opPixelFeatures.FeatureIds.connect( self.FeatureIds )
        self.opReorderIn = OpReorderAxes(parent=self)
        self.opReorderIn.Input.connect(self.InputImage)
        self.opPixelFeatures.Input.connect(self.opReorderIn.Output)
        self.opReorderOut = OpReorderAxes(parent=self)
        self.opReorderOut.Input.connect(self.opPixelFeatures.Output)
        self.opReorderLayers = OperatorWrapper(OpReorderAxes, parent=self,
                                               broadcastingSlotNames=["AxisOrder"])
        self.opReorderLayers.Input.connect(self.opPixelFeatures.Features)

        # We don't connect SelectionMatrix here because we want to 
        #  check it for errors (See setupOutputs)
        # self.opPixelFeatures.SelectionMatrix.connect( self.SelectionMatrix )

        self.WINDOW_SIZE = self.opPixelFeatures.WINDOW_SIZE
Пример #10
0
class OpFeatureSelectionNoCache(Operator):
    """
    The top-level operator for the feature selection applet for headless workflows.
    """
    name = "OpFeatureSelection"
    category = "Top-level"

    FeatureGroups = FeatureGroups
    FeatureNames = FeatureNames

    MinimalFeatures = numpy.zeros((len(FeatureNames), len(defaultScales)),
                                  dtype=bool)
    MinimalFeatures[0, 0] = True

    # Multiple input images
    InputImage = InputSlot()

    # The following input slots are applied uniformly to all input images
    FeatureIds = InputSlot(
        value=getFeatureIdOrder())  # The list of features to compute
    Scales = InputSlot(value=defaultScales
                       )  # The list of scales to use when computing features
    SelectionMatrix = InputSlot(
        value=MinimalFeatures
    )  # A matrix of bools indicating which features to output
    # A list of flags to indicate weather to use a 2d (xy) or a 3d filter for each scale in Scales
    ComputeIn2d = InputSlot(value=[])
    # The SelectionMatrix rows correspond to feature types in the order specified by the FeatureIds input.
    #  (See OpPixelFeaturesPresmoothed for the available feature types.)
    # The SelectionMatrix columns correspond to the scales provided in the Scales input,
    #  which requires that the number of matrix columns must match len(Scales.value)

    FeatureListFilename = InputSlot(stype="str", optional=True)

    # Features are presented in the channels of the output image
    # Output can be optionally accessed via an internal cache.
    # (Training a classifier benefits from caching, but predicting with an existing classifier does not.)
    OutputImage = OutputSlot()

    # For the GUI, we also provide each feature as a separate slot in this multislot
    FeatureLayers = OutputSlot(level=1)

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # Create the operator that actually generates the features
        self.opPixelFeatures = OpPixelFeaturesPresmoothed(parent=self)

        # Connect our internal operators to our external inputs
        self.opPixelFeatures.Scales.connect(self.Scales)
        self.opPixelFeatures.FeatureIds.connect(self.FeatureIds)
        self.opPixelFeatures.SelectionMatrix.connect(self.SelectionMatrix)
        self.opPixelFeatures.ComputeIn2d.connect(self.ComputeIn2d)
        self.opReorderIn = OpReorderAxes(parent=self)
        self.opReorderIn.AxisOrder.setValue('tczyx')
        self.opReorderIn.Input.connect(self.InputImage)
        self.opPixelFeatures.Input.connect(self.opReorderIn.Output)
        self.opReorderOut = OpReorderAxes(parent=self)
        self.opReorderOut.Input.connect(self.opPixelFeatures.Output)
        self.opReorderLayers = OperatorWrapper(
            OpReorderAxes, parent=self, broadcastingSlotNames=["AxisOrder"])
        self.opReorderLayers.Input.connect(self.opPixelFeatures.Features)

        self.WINDOW_SIZE = self.opPixelFeatures.WINDOW_SIZE

    def setupOutputs(self):
        # drop non-channel singleton axes
        oldAxes = self.InputImage.meta.getAxisKeys()
        # make sure channel axis is present
        if 'c' not in oldAxes:
            oldAxes.append('c')

        self.opReorderOut.AxisOrder.setValue(oldAxes)
        self.opReorderLayers.AxisOrder.setValue(oldAxes)

        # Get features from external file
        if self.FeatureListFilename.ready() and len(
                self.FeatureListFilename.value) > 0:
            raise NotImplementedError('Not simplified yet!')

            self.OutputImage.disconnect()
            self.FeatureLayers.disconnect()

            axistags = self.InputImage.meta.axistags

            with h5py.File(self.FeatureListFilename.value, 'r') as f:
                dset_names = []
                f.visit(dset_names.append)
                if len(dset_names) != 1:
                    sys.stderr.write(
                        "Input external features HDF5 file should have exactly 1 dataset.\n"
                    )
                    sys.exit(1)

                dset = f[dset_names[0]]
                chnum = dset.shape[-1]
                shape = dset.shape
                dtype = dset.dtype.type

            # Set the metadata for FeatureLayers. Unlike OutputImage and CachedOutputImage,
            # FeatureLayers has one slot per channel and therefore the channel dimension must be 1.
            self.FeatureLayers.resize(chnum)
            for i in range(chnum):
                self.FeatureLayers[i].meta.shape = shape[:-1] + (1, )
                self.FeatureLayers[i].meta.dtype = dtype
                self.FeatureLayers[i].meta.axistags = axistags
                self.FeatureLayers[i].meta.display_mode = 'default'
                self.FeatureLayers[
                    i].meta.description = "feature_channel_" + str(i)

            self.OutputImage.meta.shape = shape
            self.OutputImage.meta.dtype = dtype
            self.OutputImage.meta.axistags = axistags

        else:
            invalid_scales, invalid_z_scales = self.opPixelFeatures.getInvalidScales(
            )
            if invalid_scales or invalid_z_scales:
                invalid_z_scales = [
                    s for s in invalid_z_scales if s not in invalid_scales
                ]  # 'do not complain twice'
                msg = 'Some of your selected feature scales are too large for your dataset.\n'
                if invalid_scales:
                    msg += f'Reduce or remove these scales:\n{invalid_scales}\n\n'

                if invalid_z_scales:
                    msg += f'Reduce, remove or switch to 2D computation for these scales:\n{invalid_z_scales}\n\n'

                msg += 'Alternatively use another dataset.'
                if self.parent.parent.featureSelectionApplet._gui is None:
                    # headless
                    fix_dlgs = []
                else:
                    fix_dlgs = [
                        self.parent.parent.featureSelectionApplet._gui.
                        currentGui(
                            fallback_on_lane_0=True).onFeatureButtonClicked
                    ]

                raise DatasetConstraintError("Feature Selection",
                                             msg,
                                             fixing_dialogs=fix_dlgs)

            # Connect our external outputs to our internal operators
            self.OutputImage.connect(self.opReorderOut.Output)
            self.FeatureLayers.connect(self.opReorderLayers.Output)

    def propagateDirty(self, slot, subindex, roi):
        # Output slots are directly connected to internal operators
        pass

    def execute(self, slot, subindex, rroi, result):
        if len(self.FeatureListFilename.value) == 0:
            return

        # Set the channel corresponding to the slot(subindex) of the feature layers
        if slot == self.FeatureLayers:
            rroi.start[-1] = subindex[0]
            rroi.stop[-1] = subindex[0] + 1

        key = roiToSlice(rroi.start, rroi.stop)

        # Read features from external file
        with h5py.File(self.FeatureListFilename.value, 'r') as f:
            dset_names = []
            f.visit(dset_names.append)

            if len(dset_names) != 1:
                sys.stderr.write(
                    "Input external features HDF5 file should have exactly 1 dataset."
                )
                return

            dset = f[dset_names[0]]
            result[...] = dset[key]

        return result
Пример #11
0
    def test(self):
        graph = Graph()

        testVolumePath = 'tinyfib_volume.h5'

        # Unzip the data if necessary
        if not os.path.exists(testVolumePath):
            zippedTestVolumePath = testVolumePath + ".gz"
            assert os.path.exists(zippedTestVolumePath)
            os.system("gzip -d " + zippedTestVolumePath)
            assert os.path.exists(testVolumePath)

        f = h5py.File(testVolumePath, 'r')
        data = f['data'][...]
        data = data.view(vigra.VigraArray)
        data.axistags = vigra.defaultAxistags('txyzc')

        labels = f['labels'][...]
        assert data.shape[:-1] == labels.shape[:-1]
        assert labels.shape[-1] == 1
        assert len(data.shape) == 5
        f.close()
        scales = [0.3, 0.7, 1, 1.6, 3.5, 5.0, 10.0]
        featureIds = OpPixelFeaturesPresmoothed.DefaultFeatureIds

        # The following conditions cause this test to *usually* fail, but *sometimes* pass:
        # When using Structure Tensor EVs at sigma >= 3.5 (NaNs in feature matrix)
        # When using Gaussian Gradient Mag at sigma >= 3.5 (inf in feature matrix)
        # When using *any feature* at sigma == 10.0 (NaNs in feature matrix)

        #                    sigma:   0.3    0.7    1.0    1.6    3.5    5.0   10.0
        selections = numpy.array([
            [False, False, False, False, False, False, False],
            [False, False, False, False, False, False, False],
            [False, False, False, False, True, False, False],  # ST EVs
            [False, False, False, False, False, False, False],
            [False, False, False, False, False, False, False],  # GGM
            [False, False, False, False, False, False, False]
        ])

        opFeatures = OpPixelFeaturesPresmoothed(graph=graph)
        opFeatures.Input.setValue(data)
        opFeatures.Scales.setValue(scales)
        opFeatures.FeatureIds.setValue(featureIds)
        opFeatures.Matrix.setValue(selections)

        opTrain = OpTrainRandomForestBlocked(graph=graph)
        opTrain.Images.resize(1)
        opTrain.Images[0].connect(opFeatures.Output)
        opTrain.Labels.resize(1)
        opTrain.nonzeroLabelBlocks.resize(1)

        # This test only fails when this flag is True.
        use_sparse_label_storage = True

        if use_sparse_label_storage:
            opLabelArray = OpBlockedSparseLabelArray(graph=graph)
            opLabelArray.inputs["shape"].setValue(labels.shape)
            opLabelArray.inputs["blockShape"].setValue((1, 32, 32, 32, 1))
            opLabelArray.inputs["eraser"].setValue(100)

            opTrain.nonzeroLabelBlocks[0].connect(opLabelArray.nonzeroBlocks)

            # Slice the label data into the sparse array storage
            opLabelArray.Input[...] = labels[...]
            opTrain.Labels[0].connect(opLabelArray.Output)
        else:
            # Skip the sparse storage operator and provide labels as one big block
            opTrain.Labels[0].setValue(labels)
            # One big block
            opTrain.nonzeroLabelBlocks.resize(1)
            opTrain.nonzeroLabelBlocks[0].setValue([[slice(None, None, None)] *
                                                    5])

        # Sanity check: Make sure we configured the training operator correctly.
        readySlots = [slot.ready() for slot in opTrain.inputs.values()]
        assert all(readySlots)

        # Generate the classifier
        classifier = opTrain.Classifier.value
Пример #12
0
class OpFeatureSelectionNoCache(Operator):
    """
    The top-level operator for the feature selection applet for headless workflows.
    """
    name = "OpFeatureSelection"
    category = "Top-level"

    FeatureGroups = FeatureGroups
    FeatureNames = FeatureNames

    MinimalFeatures = numpy.zeros((len(FeatureNames), len(defaultScales)), dtype=bool)

    # Multiple input images
    InputImage = InputSlot()

    # The following input slots are applied uniformly to all input images
    SelectionMatrix = InputSlot()  # A matrix of bools indicating which features to output
    FeatureIds = InputSlot(value=getFeatureIdOrder())   # The list of features to compute
    Scales = InputSlot(value=defaultScales)             # The list of scales to use when computing features
    # A list of flags to indicate weather to use a 2d (xy) or a 3d filter for each scale in Scales
    ComputeIn2d = InputSlot(value=[])
    # The SelectionMatrix rows correspond to feature types in the order specified by the FeatureIds input.
    #  (See OpPixelFeaturesPresmoothed for the available feature types.)
    # The SelectionMatrix columns correspond to the scales provided in the Scales input,
    #  which requires that the number of matrix columns must match len(Scales.value)

    FeatureListFilename = InputSlot(stype="str", optional=True)

    # Features are presented in the channels of the output image
    # Output can be optionally accessed via an internal cache.
    # (Training a classifier benefits from caching, but predicting with an existing classifier does not.)
    OutputImage = OutputSlot()

    # For the GUI, we also provide each feature as a separate slot in this multislot
    FeatureLayers = OutputSlot(level=1)

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # Create the operator that actually generates the features
        self.opPixelFeatures = OpPixelFeaturesPresmoothed(parent=self)

        # Connect our internal operators to our external inputs
        self.opPixelFeatures.Scales.connect(self.Scales)
        self.opPixelFeatures.FeatureIds.connect(self.FeatureIds)
        self.opPixelFeatures.SelectionMatrix.connect(self.SelectionMatrix)
        self.opPixelFeatures.ComputeIn2d.connect(self.ComputeIn2d)
        self.opReorderIn = OpReorderAxes(parent=self)
        self.opReorderIn.AxisOrder.setValue('tczyx')
        self.opReorderIn.Input.connect(self.InputImage)
        self.opPixelFeatures.Input.connect(self.opReorderIn.Output)
        self.opReorderOut = OpReorderAxes(parent=self)
        self.opReorderOut.Input.connect(self.opPixelFeatures.Output)
        self.opReorderLayers = OperatorWrapper(OpReorderAxes, parent=self,
                                               broadcastingSlotNames=["AxisOrder"])
        self.opReorderLayers.Input.connect(self.opPixelFeatures.Features)

        self.WINDOW_SIZE = self.opPixelFeatures.WINDOW_SIZE

    def setupOutputs(self):
        # drop non-channel singleton axes
        oldAxes = self.InputImage.meta.getAxisKeys()
        assert 'c' in oldAxes

        self.opReorderOut.AxisOrder.setValue(oldAxes)
        self.opReorderLayers.AxisOrder.setValue(oldAxes)

        # Get features from external file
        if self.FeatureListFilename.ready() and len(self.FeatureListFilename.value) > 0:
            raise NotImplementedError('Not simplified yet!')

            self.OutputImage.disconnect()
            self.FeatureLayers.disconnect()

            axistags = self.InputImage.meta.axistags

            with h5py.File(self.FeatureListFilename.value, 'r') as f:
                dset_names = []
                f.visit(dset_names.append)
                if len(dset_names) != 1:
                    sys.stderr.write("Input external features HDF5 file should have exactly 1 dataset.\n")
                    sys.exit(1)

                dset = f[dset_names[0]]
                chnum = dset.shape[-1]
                shape = dset.shape
                dtype = dset.dtype.type

            # Set the metadata for FeatureLayers. Unlike OutputImage and CachedOutputImage,
            # FeatureLayers has one slot per channel and therefore the channel dimension must be 1.
            self.FeatureLayers.resize(chnum)
            for i in range(chnum):
                self.FeatureLayers[i].meta.shape = shape[:-1] + (1,)
                self.FeatureLayers[i].meta.dtype = dtype
                self.FeatureLayers[i].meta.axistags = axistags
                self.FeatureLayers[i].meta.display_mode = 'default'
                self.FeatureLayers[i].meta.description = "feature_channel_" + str(i)

            self.OutputImage.meta.shape = shape
            self.OutputImage.meta.dtype = dtype
            self.OutputImage.meta.axistags = axistags

        else:
            invalid_scales, invalid_z_scales = self.opPixelFeatures.getInvalidScales()
            if invalid_scales or invalid_z_scales:
                invalid_z_scales = [s for s in invalid_z_scales if s not in invalid_scales]  # 'do not complain twice'
                msg = 'Some of your selected feature scales are too large for your dataset.\n'
                if invalid_scales:
                    msg += f'Reduce or remove these scales:\n{invalid_scales}\n\n'

                if invalid_z_scales:
                    msg += f'Reduce, remove or switch to 2D computation for these scales:\n{invalid_z_scales}\n\n'

                msg += 'Alternatively use another dataset.'
                if self.parent.parent.featureSelectionApplet._gui is None:
                    # headless
                    fix_dlgs = []
                else:
                    fix_dlgs = [self.parent.parent.featureSelectionApplet._gui.currentGui(
                        fallback_on_lane_0=True).onFeatureButtonClicked]

                raise DatasetConstraintError("Feature Selection", msg, fixing_dialogs=fix_dlgs)

            # Connect our external outputs to our internal operators
            self.OutputImage.connect(self.opReorderOut.Output)
            self.FeatureLayers.connect(self.opReorderLayers.Output)

    def propagateDirty(self, slot, subindex, roi):
        # Output slots are directly connected to internal operators
        pass

    def execute(self, slot, subindex, rroi, result):
        if len(self.FeatureListFilename.value) == 0:
            return

        # Set the channel corresponding to the slot(subindex) of the feature layers
        if slot == self.FeatureLayers:
            rroi.start[-1] = subindex[0]
            rroi.stop[-1] = subindex[0] + 1

        key = roiToSlice(rroi.start, rroi.stop)

        # Read features from external file
        with h5py.File(self.FeatureListFilename.value, 'r') as f:
            dset_names = []
            f.visit(dset_names.append)

            if len(dset_names) != 1:
                sys.stderr.write("Input external features HDF5 file should have exactly 1 dataset.")
                return

            dset = f[dset_names[0]]
            result[...] = dset[key]

        return result
Пример #13
0
class OpFeatureSelectionNoCache(Operator):
    """
    The top-level operator for the feature selection applet for headless workflows.
    """
    name = "OpFeatureSelection"
    category = "Top-level"

    ScalesList = ScalesList
    FeatureGroups = FeatureGroups
    FeatureNames = FeatureNames

    MinimalFeatures = numpy.zeros((len(FeatureNames), len(ScalesList)),
                                  dtype=bool)
    MinimalFeatures[0, 0] = True

    # Multiple input images
    InputImage = InputSlot()

    # The following input slots are applied uniformly to all input images
    Scales = InputSlot(
        value=ScalesList
    )  # The list of possible scales to use when computing features
    FeatureIds = InputSlot(
        value=getFeatureIdOrder())  # The list of features to compute
    SelectionMatrix = InputSlot(
        value=MinimalFeatures
    )  # A matrix of bools indicating which features to output.
    # The matrix rows correspond to feature types in the order specified by the FeatureIds input.
    #  (See OpPixelFeaturesPresmoothed for the available feature types.)
    # The matrix columns correspond to the scales provided in the Scales input,
    #  which requires that the number of matrix columns must match len(Scales.value)

    FeatureListFilename = InputSlot(stype="str", optional=True)

    # Features are presented in the channels of the output image
    # Output can be optionally accessed via an internal cache.
    # (Training a classifier benefits from caching, but predicting with an existing classifier does not.)
    OutputImage = OutputSlot()

    FeatureLayers = OutputSlot(
        level=1
    )  # For the GUI, we also provide each feature as a separate slot in this multislot

    def __init__(self, *args, **kwargs):
        super(OpFeatureSelectionNoCache, self).__init__(*args, **kwargs)

        # Create the operator that actually generates the features
        self.opPixelFeatures = OpPixelFeaturesPresmoothed(parent=self)

        # Connect our internal operators to our external inputs
        self.opPixelFeatures.Scales.connect(self.Scales)
        self.opPixelFeatures.FeatureIds.connect(self.FeatureIds)
        self.opReorderIn = OpReorderAxes(parent=self)
        self.opReorderIn.Input.connect(self.InputImage)
        self.opPixelFeatures.Input.connect(self.opReorderIn.Output)
        self.opReorderOut = OpReorderAxes(parent=self)
        self.opReorderOut.Input.connect(self.opPixelFeatures.Output)
        self.opReorderLayers = OperatorWrapper(
            OpReorderAxes, parent=self, broadcastingSlotNames=["AxisOrder"])
        self.opReorderLayers.Input.connect(self.opPixelFeatures.Features)

        # We don't connect SelectionMatrix here because we want to
        #  check it for errors (See setupOutputs)
        # self.opPixelFeatures.SelectionMatrix.connect( self.SelectionMatrix )

        self.WINDOW_SIZE = self.opPixelFeatures.WINDOW_SIZE

    def setupOutputs(self):
        # drop non-channel singleton axes
        allAxes = 'txyzc'
        ts = self.InputImage.meta.getTaggedShape()
        oldAxes = "".join(list(ts.keys()))
        newAxes = "".join(
            [a for a in allAxes if a in ts and ts[a] > 1 or a == 'c'])
        self.opReorderIn.AxisOrder.setValue(newAxes)
        self.opReorderOut.AxisOrder.setValue(oldAxes)
        self.opReorderLayers.AxisOrder.setValue(oldAxes)

        # Get features from external file
        if self.FeatureListFilename.ready() and len(
                self.FeatureListFilename.value) > 0:

            self.OutputImage.disconnect()
            self.FeatureLayers.disconnect()

            axistags = self.InputImage.meta.axistags

            with h5py.File(self.FeatureListFilename.value, 'r') as f:
                dset_names = []
                f.visit(dset_names.append)
                if len(dset_names) != 1:
                    sys.stderr.write(
                        "Input external features HDF5 file should have exactly 1 dataset.\n"
                    )
                    sys.exit(1)

                dset = f[dset_names[0]]
                chnum = dset.shape[-1]
                shape = dset.shape
                dtype = dset.dtype.type

            # Set the metadata for FeatureLayers. Unlike OutputImage and CachedOutputImage,
            # FeatureLayers has one slot per channel and therefore the channel dimension must be 1.
            self.FeatureLayers.resize(chnum)
            for i in range(chnum):
                self.FeatureLayers[i].meta.shape = shape[:-1] + (1, )
                self.FeatureLayers[i].meta.dtype = dtype
                self.FeatureLayers[i].meta.axistags = axistags
                self.FeatureLayers[i].meta.display_mode = 'default'
                self.FeatureLayers[
                    i].meta.description = "feature_channel_" + str(i)

            self.OutputImage.meta.shape = shape
            self.OutputImage.meta.dtype = dtype
            self.OutputImage.meta.axistags = axistags

        else:
            # Set the new selection matrix and check if it creates an error.
            selections = self.SelectionMatrix.value
            self.opPixelFeatures.Matrix.setValue(selections)
            invalid_scales = self.opPixelFeatures.getInvalidScales()
            if invalid_scales:
                msg = "Some of your selected feature scales are too large for your dataset.\n"\
                      "Choose smaller scales (sigma) or use a larger dataset.\n"\
                      "The invalid scales are: {}".format( invalid_scales )
                raise DatasetConstraintError("Feature Selection", msg)

            # Connect our external outputs to our internal operators
            self.OutputImage.connect(self.opReorderOut.Output)
            self.FeatureLayers.connect(self.opReorderLayers.Output)

    def propagateDirty(self, slot, subindex, roi):
        # Output slots are directly connected to internal operators
        pass

    def execute(self, slot, subindex, rroi, result):
        if len(self.FeatureListFilename.value) == 0:
            return

        # Set the channel corresponding to the slot(subindex) of the feature layers
        if slot == self.FeatureLayers:
            rroi.start[-1] = subindex[0]
            rroi.stop[-1] = subindex[0] + 1

        key = roiToSlice(rroi.start, rroi.stop)

        # Read features from external file
        with h5py.File(self.FeatureListFilename.value, 'r') as f:
            dset_names = []
            f.visit(dset_names.append)

            if len(dset_names) != 1:
                sys.stderr.write(
                    "Input external features HDF5 file should have exactly 1 dataset."
                )
                return

            dset = f[dset_names[0]]
            result[...] = dset[key]

        return result
Пример #14
0
class OpFeatureSelectionNoCache(Operator):
    """
    The top-level operator for the feature selection applet for headless workflows.
    """
    name = "OpFeatureSelection"
    category = "Top-level"

    ScalesList = ScalesList
    FeatureGroups = FeatureGroups
    FeatureNames = FeatureNames

    MinimalFeatures = numpy.zeros( (len(FeatureNames), len(ScalesList)), dtype=bool )
    MinimalFeatures[0,0] = True

    # Multiple input images
    InputImage = InputSlot()

    # The following input slots are applied uniformly to all input images
    Scales = InputSlot( value=ScalesList ) # The list of possible scales to use when computing features
    FeatureIds = InputSlot( value=getFeatureIdOrder() ) # The list of features to compute
    SelectionMatrix = InputSlot( value=MinimalFeatures ) # A matrix of bools indicating which features to output.
                                                       # The matrix rows correspond to feature types in the order specified by the FeatureIds input.
                                                       #  (See OpPixelFeaturesPresmoothed for the available feature types.)
                                                       # The matrix columns correspond to the scales provided in the Scales input,
                                                       #  which requires that the number of matrix columns must match len(Scales.value)

    FeatureListFilename = InputSlot(stype="str", optional=True)
    
    # Features are presented in the channels of the output image
    # Output can be optionally accessed via an internal cache.
    # (Training a classifier benefits from caching, but predicting with an existing classifier does not.)
    OutputImage = OutputSlot()

    FeatureLayers = OutputSlot(level=1) # For the GUI, we also provide each feature as a separate slot in this multislot

    def __init__(self, *args, **kwargs):
        super(OpFeatureSelectionNoCache, self).__init__(*args, **kwargs)

        # Create the operator that actually generates the features
        self.opPixelFeatures = OpPixelFeaturesPresmoothed(parent=self)

        # Connect our internal operators to our external inputs 
        self.opPixelFeatures.Scales.connect( self.Scales )
        self.opPixelFeatures.FeatureIds.connect( self.FeatureIds )
        self.opReorderIn = OpReorderAxes(parent=self)
        self.opReorderIn.Input.connect(self.InputImage)
        self.opPixelFeatures.Input.connect(self.opReorderIn.Output)
        self.opReorderOut = OpReorderAxes(parent=self)
        self.opReorderOut.Input.connect(self.opPixelFeatures.Output)
        self.opReorderLayers = OperatorWrapper(OpReorderAxes, parent=self,
                                               broadcastingSlotNames=["AxisOrder"])
        self.opReorderLayers.Input.connect(self.opPixelFeatures.Features)

        # We don't connect SelectionMatrix here because we want to 
        #  check it for errors (See setupOutputs)
        # self.opPixelFeatures.SelectionMatrix.connect( self.SelectionMatrix )

        self.WINDOW_SIZE = self.opPixelFeatures.WINDOW_SIZE

    def setupOutputs(self):
        # drop non-channel singleton axes
        allAxes = 'txyzc'
        ts = self.InputImage.meta.getTaggedShape()
        oldAxes = "".join(list(ts.keys()))
        newAxes = "".join([a for a in allAxes
                           if a in ts and ts[a] > 1 or a == 'c'])
        self.opReorderIn.AxisOrder.setValue(newAxes)
        self.opReorderOut.AxisOrder.setValue(oldAxes)
        self.opReorderLayers.AxisOrder.setValue(oldAxes)
        
        # Get features from external file
        if self.FeatureListFilename.ready() and len(self.FeatureListFilename.value) > 0:
                  
            self.OutputImage.disconnect()
            self.FeatureLayers.disconnect()
            
            axistags = self.InputImage.meta.axistags
                
            with h5py.File(self.FeatureListFilename.value,'r') as f:
                dset_names = []
                f.visit(dset_names.append)
                if len(dset_names) != 1:
                    sys.stderr.write("Input external features HDF5 file should have exactly 1 dataset.\n")
                    sys.exit(1)                
                
                dset = f[dset_names[0]]
                chnum = dset.shape[-1]
                shape = dset.shape
                dtype = dset.dtype.type
            
            # Set the metadata for FeatureLayers. Unlike OutputImage and CachedOutputImage, 
            # FeatureLayers has one slot per channel and therefore the channel dimension must be 1.
            self.FeatureLayers.resize(chnum)
            for i in range(chnum):
                self.FeatureLayers[i].meta.shape    = shape[:-1]+(1,)
                self.FeatureLayers[i].meta.dtype    = dtype
                self.FeatureLayers[i].meta.axistags = axistags 
                self.FeatureLayers[i].meta.display_mode = 'default' 
                self.FeatureLayers[i].meta.description = "feature_channel_"+str(i)
            
            self.OutputImage.meta.shape    = shape
            self.OutputImage.meta.dtype    = dtype 
            self.OutputImage.meta.axistags = axistags
            
        else:
            # Set the new selection matrix and check if it creates an error.
            selections = self.SelectionMatrix.value
            self.opPixelFeatures.Matrix.setValue( selections )
            invalid_scales = self.opPixelFeatures.getInvalidScales()
            if invalid_scales:
                msg = "Some of your selected feature scales are too large for your dataset.\n"\
                      "Choose smaller scales (sigma) or use a larger dataset.\n"\
                      "The invalid scales are: {}".format( invalid_scales )                      
                raise DatasetConstraintError( "Feature Selection", msg )
            
            # Connect our external outputs to our internal operators
            self.OutputImage.connect( self.opReorderOut.Output )
            self.FeatureLayers.connect( self.opReorderLayers.Output )

    def propagateDirty(self, slot, subindex, roi):
        # Output slots are directly connected to internal operators
        pass
    
    def execute(self, slot, subindex, rroi, result):
        if len(self.FeatureListFilename.value) == 0:
            return
        
        # Set the channel corresponding to the slot(subindex) of the feature layers
        if slot == self.FeatureLayers:
            rroi.start[-1] = subindex[0]
            rroi.stop[-1] = subindex[0] + 1 
            
        key = roiToSlice(rroi.start, rroi.stop)
        
        # Read features from external file
        with h5py.File(self.FeatureListFilename.value, 'r') as f:
            dset_names = []
            f.visit(dset_names.append)
            
            if len(dset_names) != 1:
                sys.stderr.write("Input external features HDF5 file should have exactly 1 dataset.")
                return 
                
            dset = f[dset_names[0]]              
            result[...] = dset[key]
                        
        return result