class OpFeatureSelectionNoCache(Operator): """ The top-level operator for the feature selection applet for headless workflows. """ name = "OpFeatureSelection" category = "Top-level" FeatureGroups = FeatureGroups FeatureNames = FeatureNames MinimalFeatures = numpy.zeros((len(FeatureNames), len(defaultScales)), dtype=bool) MinimalFeatures[0, 0] = True # Multiple input images InputImage = InputSlot() # The following input slots are applied uniformly to all input images FeatureIds = InputSlot( value=getFeatureIdOrder()) # The list of features to compute Scales = InputSlot(value=defaultScales ) # The list of scales to use when computing features SelectionMatrix = InputSlot( value=MinimalFeatures ) # A matrix of bools indicating which features to output # A list of flags to indicate weather to use a 2d (xy) or a 3d filter for each scale in Scales ComputeIn2d = InputSlot(value=[]) # The SelectionMatrix rows correspond to feature types in the order specified by the FeatureIds input. # (See OpPixelFeaturesPresmoothed for the available feature types.) # The SelectionMatrix columns correspond to the scales provided in the Scales input, # which requires that the number of matrix columns must match len(Scales.value) FeatureListFilename = InputSlot(stype="str", optional=True) # Features are presented in the channels of the output image # Output can be optionally accessed via an internal cache. # (Training a classifier benefits from caching, but predicting with an existing classifier does not.) OutputImage = OutputSlot() # For the GUI, we also provide each feature as a separate slot in this multislot FeatureLayers = OutputSlot(level=1) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Create the operator that actually generates the features self.opPixelFeatures = OpPixelFeaturesPresmoothed(parent=self) # Connect our internal operators to our external inputs self.opPixelFeatures.Scales.connect(self.Scales) self.opPixelFeatures.FeatureIds.connect(self.FeatureIds) self.opPixelFeatures.SelectionMatrix.connect(self.SelectionMatrix) self.opPixelFeatures.ComputeIn2d.connect(self.ComputeIn2d) self.opReorderIn = OpReorderAxes(parent=self) self.opReorderIn.AxisOrder.setValue('tczyx') self.opReorderIn.Input.connect(self.InputImage) self.opPixelFeatures.Input.connect(self.opReorderIn.Output) self.opReorderOut = OpReorderAxes(parent=self) self.opReorderOut.Input.connect(self.opPixelFeatures.Output) self.opReorderLayers = OperatorWrapper( OpReorderAxes, parent=self, broadcastingSlotNames=["AxisOrder"]) self.opReorderLayers.Input.connect(self.opPixelFeatures.Features) self.WINDOW_SIZE = self.opPixelFeatures.WINDOW_SIZE def setupOutputs(self): # drop non-channel singleton axes oldAxes = self.InputImage.meta.getAxisKeys() # make sure channel axis is present if 'c' not in oldAxes: oldAxes.append('c') self.opReorderOut.AxisOrder.setValue(oldAxes) self.opReorderLayers.AxisOrder.setValue(oldAxes) # Get features from external file if self.FeatureListFilename.ready() and len( self.FeatureListFilename.value) > 0: raise NotImplementedError('Not simplified yet!') self.OutputImage.disconnect() self.FeatureLayers.disconnect() axistags = self.InputImage.meta.axistags with h5py.File(self.FeatureListFilename.value, 'r') as f: dset_names = [] f.visit(dset_names.append) if len(dset_names) != 1: sys.stderr.write( "Input external features HDF5 file should have exactly 1 dataset.\n" ) sys.exit(1) dset = f[dset_names[0]] chnum = dset.shape[-1] shape = dset.shape dtype = dset.dtype.type # Set the metadata for FeatureLayers. Unlike OutputImage and CachedOutputImage, # FeatureLayers has one slot per channel and therefore the channel dimension must be 1. self.FeatureLayers.resize(chnum) for i in range(chnum): self.FeatureLayers[i].meta.shape = shape[:-1] + (1, ) self.FeatureLayers[i].meta.dtype = dtype self.FeatureLayers[i].meta.axistags = axistags self.FeatureLayers[i].meta.display_mode = 'default' self.FeatureLayers[ i].meta.description = "feature_channel_" + str(i) self.OutputImage.meta.shape = shape self.OutputImage.meta.dtype = dtype self.OutputImage.meta.axistags = axistags else: invalid_scales, invalid_z_scales = self.opPixelFeatures.getInvalidScales( ) if invalid_scales or invalid_z_scales: invalid_z_scales = [ s for s in invalid_z_scales if s not in invalid_scales ] # 'do not complain twice' msg = 'Some of your selected feature scales are too large for your dataset.\n' if invalid_scales: msg += f'Reduce or remove these scales:\n{invalid_scales}\n\n' if invalid_z_scales: msg += f'Reduce, remove or switch to 2D computation for these scales:\n{invalid_z_scales}\n\n' msg += 'Alternatively use another dataset.' if self.parent.parent.featureSelectionApplet._gui is None: # headless fix_dlgs = [] else: fix_dlgs = [ self.parent.parent.featureSelectionApplet._gui. currentGui( fallback_on_lane_0=True).onFeatureButtonClicked ] raise DatasetConstraintError("Feature Selection", msg, fixing_dialogs=fix_dlgs) # Connect our external outputs to our internal operators self.OutputImage.connect(self.opReorderOut.Output) self.FeatureLayers.connect(self.opReorderLayers.Output) def propagateDirty(self, slot, subindex, roi): # Output slots are directly connected to internal operators pass def execute(self, slot, subindex, rroi, result): if len(self.FeatureListFilename.value) == 0: return # Set the channel corresponding to the slot(subindex) of the feature layers if slot == self.FeatureLayers: rroi.start[-1] = subindex[0] rroi.stop[-1] = subindex[0] + 1 key = roiToSlice(rroi.start, rroi.stop) # Read features from external file with h5py.File(self.FeatureListFilename.value, 'r') as f: dset_names = [] f.visit(dset_names.append) if len(dset_names) != 1: sys.stderr.write( "Input external features HDF5 file should have exactly 1 dataset." ) return dset = f[dset_names[0]] result[...] = dset[key] return result
class OpFeatureSelectionNoCache(Operator): """ The top-level operator for the feature selection applet for headless workflows. """ name = "OpFeatureSelection" category = "Top-level" ScalesList = ScalesList FeatureGroups = FeatureGroups FeatureNames = FeatureNames MinimalFeatures = numpy.zeros((len(FeatureNames), len(ScalesList)), dtype=bool) MinimalFeatures[0, 0] = True # Multiple input images InputImage = InputSlot() # The following input slots are applied uniformly to all input images Scales = InputSlot( value=ScalesList ) # The list of possible scales to use when computing features FeatureIds = InputSlot( value=getFeatureIdOrder()) # The list of features to compute SelectionMatrix = InputSlot( value=MinimalFeatures ) # A matrix of bools indicating which features to output. # The matrix rows correspond to feature types in the order specified by the FeatureIds input. # (See OpPixelFeaturesPresmoothed for the available feature types.) # The matrix columns correspond to the scales provided in the Scales input, # which requires that the number of matrix columns must match len(Scales.value) FeatureListFilename = InputSlot(stype="str", optional=True) # Features are presented in the channels of the output image # Output can be optionally accessed via an internal cache. # (Training a classifier benefits from caching, but predicting with an existing classifier does not.) OutputImage = OutputSlot() FeatureLayers = OutputSlot( level=1 ) # For the GUI, we also provide each feature as a separate slot in this multislot def __init__(self, *args, **kwargs): super(OpFeatureSelectionNoCache, self).__init__(*args, **kwargs) # Create the operator that actually generates the features self.opPixelFeatures = OpPixelFeaturesPresmoothed(parent=self) # Connect our internal operators to our external inputs self.opPixelFeatures.Scales.connect(self.Scales) self.opPixelFeatures.FeatureIds.connect(self.FeatureIds) self.opReorderIn = OpReorderAxes(parent=self) self.opReorderIn.Input.connect(self.InputImage) self.opPixelFeatures.Input.connect(self.opReorderIn.Output) self.opReorderOut = OpReorderAxes(parent=self) self.opReorderOut.Input.connect(self.opPixelFeatures.Output) self.opReorderLayers = OperatorWrapper( OpReorderAxes, parent=self, broadcastingSlotNames=["AxisOrder"]) self.opReorderLayers.Input.connect(self.opPixelFeatures.Features) # We don't connect SelectionMatrix here because we want to # check it for errors (See setupOutputs) # self.opPixelFeatures.SelectionMatrix.connect( self.SelectionMatrix ) self.WINDOW_SIZE = self.opPixelFeatures.WINDOW_SIZE def setupOutputs(self): # drop non-channel singleton axes allAxes = 'txyzc' ts = self.InputImage.meta.getTaggedShape() oldAxes = "".join(list(ts.keys())) newAxes = "".join( [a for a in allAxes if a in ts and ts[a] > 1 or a == 'c']) self.opReorderIn.AxisOrder.setValue(newAxes) self.opReorderOut.AxisOrder.setValue(oldAxes) self.opReorderLayers.AxisOrder.setValue(oldAxes) # Get features from external file if self.FeatureListFilename.ready() and len( self.FeatureListFilename.value) > 0: self.OutputImage.disconnect() self.FeatureLayers.disconnect() axistags = self.InputImage.meta.axistags with h5py.File(self.FeatureListFilename.value, 'r') as f: dset_names = [] f.visit(dset_names.append) if len(dset_names) != 1: sys.stderr.write( "Input external features HDF5 file should have exactly 1 dataset.\n" ) sys.exit(1) dset = f[dset_names[0]] chnum = dset.shape[-1] shape = dset.shape dtype = dset.dtype.type # Set the metadata for FeatureLayers. Unlike OutputImage and CachedOutputImage, # FeatureLayers has one slot per channel and therefore the channel dimension must be 1. self.FeatureLayers.resize(chnum) for i in range(chnum): self.FeatureLayers[i].meta.shape = shape[:-1] + (1, ) self.FeatureLayers[i].meta.dtype = dtype self.FeatureLayers[i].meta.axistags = axistags self.FeatureLayers[i].meta.display_mode = 'default' self.FeatureLayers[ i].meta.description = "feature_channel_" + str(i) self.OutputImage.meta.shape = shape self.OutputImage.meta.dtype = dtype self.OutputImage.meta.axistags = axistags else: # Set the new selection matrix and check if it creates an error. selections = self.SelectionMatrix.value self.opPixelFeatures.Matrix.setValue(selections) invalid_scales = self.opPixelFeatures.getInvalidScales() if invalid_scales: msg = "Some of your selected feature scales are too large for your dataset.\n"\ "Choose smaller scales (sigma) or use a larger dataset.\n"\ "The invalid scales are: {}".format( invalid_scales ) raise DatasetConstraintError("Feature Selection", msg) # Connect our external outputs to our internal operators self.OutputImage.connect(self.opReorderOut.Output) self.FeatureLayers.connect(self.opReorderLayers.Output) def propagateDirty(self, slot, subindex, roi): # Output slots are directly connected to internal operators pass def execute(self, slot, subindex, rroi, result): if len(self.FeatureListFilename.value) == 0: return # Set the channel corresponding to the slot(subindex) of the feature layers if slot == self.FeatureLayers: rroi.start[-1] = subindex[0] rroi.stop[-1] = subindex[0] + 1 key = roiToSlice(rroi.start, rroi.stop) # Read features from external file with h5py.File(self.FeatureListFilename.value, 'r') as f: dset_names = [] f.visit(dset_names.append) if len(dset_names) != 1: sys.stderr.write( "Input external features HDF5 file should have exactly 1 dataset." ) return dset = f[dset_names[0]] result[...] = dset[key] return result
class OpFeatureSelectionNoCache(Operator): """ The top-level operator for the feature selection applet for headless workflows. """ name = "OpFeatureSelection" category = "Top-level" FeatureGroups = FeatureGroups FeatureNames = FeatureNames MinimalFeatures = numpy.zeros((len(FeatureNames), len(defaultScales)), dtype=bool) # Multiple input images InputImage = InputSlot() # The following input slots are applied uniformly to all input images SelectionMatrix = InputSlot() # A matrix of bools indicating which features to output FeatureIds = InputSlot(value=getFeatureIdOrder()) # The list of features to compute Scales = InputSlot(value=defaultScales) # The list of scales to use when computing features # A list of flags to indicate weather to use a 2d (xy) or a 3d filter for each scale in Scales ComputeIn2d = InputSlot(value=[]) # The SelectionMatrix rows correspond to feature types in the order specified by the FeatureIds input. # (See OpPixelFeaturesPresmoothed for the available feature types.) # The SelectionMatrix columns correspond to the scales provided in the Scales input, # which requires that the number of matrix columns must match len(Scales.value) FeatureListFilename = InputSlot(stype="str", optional=True) # Features are presented in the channels of the output image # Output can be optionally accessed via an internal cache. # (Training a classifier benefits from caching, but predicting with an existing classifier does not.) OutputImage = OutputSlot() # For the GUI, we also provide each feature as a separate slot in this multislot FeatureLayers = OutputSlot(level=1) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Create the operator that actually generates the features self.opPixelFeatures = OpPixelFeaturesPresmoothed(parent=self) # Connect our internal operators to our external inputs self.opPixelFeatures.Scales.connect(self.Scales) self.opPixelFeatures.FeatureIds.connect(self.FeatureIds) self.opPixelFeatures.SelectionMatrix.connect(self.SelectionMatrix) self.opPixelFeatures.ComputeIn2d.connect(self.ComputeIn2d) self.opReorderIn = OpReorderAxes(parent=self) self.opReorderIn.AxisOrder.setValue('tczyx') self.opReorderIn.Input.connect(self.InputImage) self.opPixelFeatures.Input.connect(self.opReorderIn.Output) self.opReorderOut = OpReorderAxes(parent=self) self.opReorderOut.Input.connect(self.opPixelFeatures.Output) self.opReorderLayers = OperatorWrapper(OpReorderAxes, parent=self, broadcastingSlotNames=["AxisOrder"]) self.opReorderLayers.Input.connect(self.opPixelFeatures.Features) self.WINDOW_SIZE = self.opPixelFeatures.WINDOW_SIZE def setupOutputs(self): # drop non-channel singleton axes oldAxes = self.InputImage.meta.getAxisKeys() assert 'c' in oldAxes self.opReorderOut.AxisOrder.setValue(oldAxes) self.opReorderLayers.AxisOrder.setValue(oldAxes) # Get features from external file if self.FeatureListFilename.ready() and len(self.FeatureListFilename.value) > 0: raise NotImplementedError('Not simplified yet!') self.OutputImage.disconnect() self.FeatureLayers.disconnect() axistags = self.InputImage.meta.axistags with h5py.File(self.FeatureListFilename.value, 'r') as f: dset_names = [] f.visit(dset_names.append) if len(dset_names) != 1: sys.stderr.write("Input external features HDF5 file should have exactly 1 dataset.\n") sys.exit(1) dset = f[dset_names[0]] chnum = dset.shape[-1] shape = dset.shape dtype = dset.dtype.type # Set the metadata for FeatureLayers. Unlike OutputImage and CachedOutputImage, # FeatureLayers has one slot per channel and therefore the channel dimension must be 1. self.FeatureLayers.resize(chnum) for i in range(chnum): self.FeatureLayers[i].meta.shape = shape[:-1] + (1,) self.FeatureLayers[i].meta.dtype = dtype self.FeatureLayers[i].meta.axistags = axistags self.FeatureLayers[i].meta.display_mode = 'default' self.FeatureLayers[i].meta.description = "feature_channel_" + str(i) self.OutputImage.meta.shape = shape self.OutputImage.meta.dtype = dtype self.OutputImage.meta.axistags = axistags else: invalid_scales, invalid_z_scales = self.opPixelFeatures.getInvalidScales() if invalid_scales or invalid_z_scales: invalid_z_scales = [s for s in invalid_z_scales if s not in invalid_scales] # 'do not complain twice' msg = 'Some of your selected feature scales are too large for your dataset.\n' if invalid_scales: msg += f'Reduce or remove these scales:\n{invalid_scales}\n\n' if invalid_z_scales: msg += f'Reduce, remove or switch to 2D computation for these scales:\n{invalid_z_scales}\n\n' msg += 'Alternatively use another dataset.' if self.parent.parent.featureSelectionApplet._gui is None: # headless fix_dlgs = [] else: fix_dlgs = [self.parent.parent.featureSelectionApplet._gui.currentGui( fallback_on_lane_0=True).onFeatureButtonClicked] raise DatasetConstraintError("Feature Selection", msg, fixing_dialogs=fix_dlgs) # Connect our external outputs to our internal operators self.OutputImage.connect(self.opReorderOut.Output) self.FeatureLayers.connect(self.opReorderLayers.Output) def propagateDirty(self, slot, subindex, roi): # Output slots are directly connected to internal operators pass def execute(self, slot, subindex, rroi, result): if len(self.FeatureListFilename.value) == 0: return # Set the channel corresponding to the slot(subindex) of the feature layers if slot == self.FeatureLayers: rroi.start[-1] = subindex[0] rroi.stop[-1] = subindex[0] + 1 key = roiToSlice(rroi.start, rroi.stop) # Read features from external file with h5py.File(self.FeatureListFilename.value, 'r') as f: dset_names = [] f.visit(dset_names.append) if len(dset_names) != 1: sys.stderr.write("Input external features HDF5 file should have exactly 1 dataset.") return dset = f[dset_names[0]] result[...] = dset[key] return result
class OpFeatureSelectionNoCache(Operator): """ The top-level operator for the feature selection applet for headless workflows. """ name = "OpFeatureSelection" category = "Top-level" ScalesList = ScalesList FeatureGroups = FeatureGroups FeatureNames = FeatureNames MinimalFeatures = numpy.zeros( (len(FeatureNames), len(ScalesList)), dtype=bool ) MinimalFeatures[0,0] = True # Multiple input images InputImage = InputSlot() # The following input slots are applied uniformly to all input images Scales = InputSlot( value=ScalesList ) # The list of possible scales to use when computing features FeatureIds = InputSlot( value=getFeatureIdOrder() ) # The list of features to compute SelectionMatrix = InputSlot( value=MinimalFeatures ) # A matrix of bools indicating which features to output. # The matrix rows correspond to feature types in the order specified by the FeatureIds input. # (See OpPixelFeaturesPresmoothed for the available feature types.) # The matrix columns correspond to the scales provided in the Scales input, # which requires that the number of matrix columns must match len(Scales.value) FeatureListFilename = InputSlot(stype="str", optional=True) # Features are presented in the channels of the output image # Output can be optionally accessed via an internal cache. # (Training a classifier benefits from caching, but predicting with an existing classifier does not.) OutputImage = OutputSlot() FeatureLayers = OutputSlot(level=1) # For the GUI, we also provide each feature as a separate slot in this multislot def __init__(self, *args, **kwargs): super(OpFeatureSelectionNoCache, self).__init__(*args, **kwargs) # Create the operator that actually generates the features self.opPixelFeatures = OpPixelFeaturesPresmoothed(parent=self) # Connect our internal operators to our external inputs self.opPixelFeatures.Scales.connect( self.Scales ) self.opPixelFeatures.FeatureIds.connect( self.FeatureIds ) self.opReorderIn = OpReorderAxes(parent=self) self.opReorderIn.Input.connect(self.InputImage) self.opPixelFeatures.Input.connect(self.opReorderIn.Output) self.opReorderOut = OpReorderAxes(parent=self) self.opReorderOut.Input.connect(self.opPixelFeatures.Output) self.opReorderLayers = OperatorWrapper(OpReorderAxes, parent=self, broadcastingSlotNames=["AxisOrder"]) self.opReorderLayers.Input.connect(self.opPixelFeatures.Features) # We don't connect SelectionMatrix here because we want to # check it for errors (See setupOutputs) # self.opPixelFeatures.SelectionMatrix.connect( self.SelectionMatrix ) self.WINDOW_SIZE = self.opPixelFeatures.WINDOW_SIZE def setupOutputs(self): # drop non-channel singleton axes allAxes = 'txyzc' ts = self.InputImage.meta.getTaggedShape() oldAxes = "".join(list(ts.keys())) newAxes = "".join([a for a in allAxes if a in ts and ts[a] > 1 or a == 'c']) self.opReorderIn.AxisOrder.setValue(newAxes) self.opReorderOut.AxisOrder.setValue(oldAxes) self.opReorderLayers.AxisOrder.setValue(oldAxes) # Get features from external file if self.FeatureListFilename.ready() and len(self.FeatureListFilename.value) > 0: self.OutputImage.disconnect() self.FeatureLayers.disconnect() axistags = self.InputImage.meta.axistags with h5py.File(self.FeatureListFilename.value,'r') as f: dset_names = [] f.visit(dset_names.append) if len(dset_names) != 1: sys.stderr.write("Input external features HDF5 file should have exactly 1 dataset.\n") sys.exit(1) dset = f[dset_names[0]] chnum = dset.shape[-1] shape = dset.shape dtype = dset.dtype.type # Set the metadata for FeatureLayers. Unlike OutputImage and CachedOutputImage, # FeatureLayers has one slot per channel and therefore the channel dimension must be 1. self.FeatureLayers.resize(chnum) for i in range(chnum): self.FeatureLayers[i].meta.shape = shape[:-1]+(1,) self.FeatureLayers[i].meta.dtype = dtype self.FeatureLayers[i].meta.axistags = axistags self.FeatureLayers[i].meta.display_mode = 'default' self.FeatureLayers[i].meta.description = "feature_channel_"+str(i) self.OutputImage.meta.shape = shape self.OutputImage.meta.dtype = dtype self.OutputImage.meta.axistags = axistags else: # Set the new selection matrix and check if it creates an error. selections = self.SelectionMatrix.value self.opPixelFeatures.Matrix.setValue( selections ) invalid_scales = self.opPixelFeatures.getInvalidScales() if invalid_scales: msg = "Some of your selected feature scales are too large for your dataset.\n"\ "Choose smaller scales (sigma) or use a larger dataset.\n"\ "The invalid scales are: {}".format( invalid_scales ) raise DatasetConstraintError( "Feature Selection", msg ) # Connect our external outputs to our internal operators self.OutputImage.connect( self.opReorderOut.Output ) self.FeatureLayers.connect( self.opReorderLayers.Output ) def propagateDirty(self, slot, subindex, roi): # Output slots are directly connected to internal operators pass def execute(self, slot, subindex, rroi, result): if len(self.FeatureListFilename.value) == 0: return # Set the channel corresponding to the slot(subindex) of the feature layers if slot == self.FeatureLayers: rroi.start[-1] = subindex[0] rroi.stop[-1] = subindex[0] + 1 key = roiToSlice(rroi.start, rroi.stop) # Read features from external file with h5py.File(self.FeatureListFilename.value, 'r') as f: dset_names = [] f.visit(dset_names.append) if len(dset_names) != 1: sys.stderr.write("Input external features HDF5 file should have exactly 1 dataset.") return dset = f[dset_names[0]] result[...] = dset[key] return result