def __init__(self, *args, **kwargs): super(OpPixelFeaturesPresmoothed, self).__init__(*args, **kwargs) #set up basic operators self.stacker = OpMultiArrayStacker(parent=self) self.multi = Op50ToMulti(parent=self) self.stacker.Images.connect(self.multi.Outputs) self.smoother = OpGaussianSmoothing(parent=self) self.smoother.Input.connect(self.Input) # Defaults self.inputs["FeatureIds"].setValue(self.DefaultFeatureIds) self.destSigma = 1.0 self.windowSize = 4.0
def connectLane(self, laneIndex): # Get a handle to each operator opData = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex) opFirstFeatures = self.featureSelectionApplets[ 0].topLevelOperator.getLane(laneIndex) opFirstClassify = self.pcApplets[0].topLevelOperator.getLane(laneIndex) opFinalClassify = self.pcApplets[-1].topLevelOperator.getLane( laneIndex) opDataExport = self.dataExportApplet.topLevelOperator.getLane( laneIndex) def checkConstraints(*_): # if (opData.Image.meta.dtype in [np.uint8, np.uint16]) == False: # msg = "The Autocontext Workflow only supports 8-bit images (UINT8 pixel type)\n"\ # "or 16-bit images (UINT16 pixel type)\n"\ # "Your image has a pixel type of {}. Please convert your data to UINT8 and try again."\ # .format( str(np.dtype(opData.Image.meta.dtype)) ) # raise DatasetConstraintError( "Autocontext Workflow", msg, unfixable=True ) pass opData.Image.notifyReady(checkConstraints) # Input Image -> Feature Op # and -> Classification Op (for display) opFirstFeatures.InputImage.connect(opData.Image) opFirstClassify.InputImages.connect(opData.Image) # Feature Images -> Classification Op (for training, prediction) opFirstClassify.FeatureImages.connect(opFirstFeatures.OutputImage) opFirstClassify.CachedFeatureImages.connect( opFirstFeatures.CachedOutputImage) upstreamPcApplets = self.pcApplets[0:-1] downstreamFeatureApplets = self.featureSelectionApplets[1:] downstreamPcApplets = self.pcApplets[1:] for (upstreamPcApplet, downstreamFeaturesApplet, downstreamPcApplet) in zip(upstreamPcApplets, downstreamFeatureApplets, downstreamPcApplets): opUpstreamClassify = upstreamPcApplet.topLevelOperator.getLane( laneIndex) opDownstreamFeatures = downstreamFeaturesApplet.topLevelOperator.getLane( laneIndex) opDownstreamClassify = downstreamPcApplet.topLevelOperator.getLane( laneIndex) # Connect label inputs (all are connected together). # opDownstreamClassify.LabelInputs.connect( opUpstreamClassify.LabelInputs ) # Connect data path assert opData.Image.meta.dtype == opUpstreamClassify.PredictionProbabilitiesAutocontext.meta.dtype, ( "Probability dtype needs to match up with input image dtype, got: " f"input: {opData.Image.meta.dtype} " f"probabilities: {opUpstreamClassify.PredictionProbabilitiesAutocontext.meta.dtype}" ) opStacker = OpMultiArrayStacker(parent=self) opStacker.Images.resize(2) opStacker.Images[0].connect(opData.Image) opStacker.Images[1].connect( opUpstreamClassify.PredictionProbabilitiesAutocontext) opStacker.AxisFlag.setValue("c") opDownstreamFeatures.InputImage.connect(opStacker.Output) opDownstreamClassify.InputImages.connect(opStacker.Output) opDownstreamClassify.FeatureImages.connect( opDownstreamFeatures.OutputImage) opDownstreamClassify.CachedFeatureImages.connect( opDownstreamFeatures.CachedOutputImage) # Data Export connections opDataExport.RawData.connect(opData.ImageGroup[self.DATA_ROLE_RAW]) opDataExport.RawDatasetInfo.connect( opData.DatasetGroup[self.DATA_ROLE_RAW]) opDataExport.ConstraintDataset.connect( opData.ImageGroup[self.DATA_ROLE_RAW]) opDataExport.Inputs.resize(len(self.EXPORT_NAMES)) for reverse_stage_index, (stage_index, pcApplet) in enumerate( reversed(list(enumerate(self.pcApplets)))): opPc = pcApplet.topLevelOperator.getLane(laneIndex) num_items_per_stage = len(self.EXPORT_NAMES_PER_STAGE) opDataExport.Inputs[num_items_per_stage * reverse_stage_index + 0].connect( opPc.HeadlessPredictionProbabilities) opDataExport.Inputs[num_items_per_stage * reverse_stage_index + 1].connect(opPc.SimpleSegmentation) opDataExport.Inputs[num_items_per_stage * reverse_stage_index + 2].connect(opPc.HeadlessUncertaintyEstimate) opDataExport.Inputs[num_items_per_stage * reverse_stage_index + 3].connect(opPc.FeatureImages) opDataExport.Inputs[num_items_per_stage * reverse_stage_index + 4].connect(opPc.LabelImages) opDataExport.Inputs[ num_items_per_stage * reverse_stage_index + 5].connect( opPc.InputImages ) # Input must come last due to an assumption in PixelClassificationDataExportGui # One last export slot for all probabilities, all stages opAllStageStacker = OpMultiArrayStacker(parent=self) opAllStageStacker.Images.resize(len(self.pcApplets)) for stage_index, pcApplet in enumerate(self.pcApplets): opPc = pcApplet.topLevelOperator.getLane(laneIndex) opAllStageStacker.Images[stage_index].connect( opPc.HeadlessPredictionProbabilities) opAllStageStacker.AxisFlag.setValue("c") # The ideal_blockshape metadata field will be bogus, so just eliminate it # (Otherwise, the channels could be split up in an unfortunate way.) opMetadataOverride = OpMetadataInjector(parent=self) opMetadataOverride.Input.connect(opAllStageStacker.Output) opMetadataOverride.Metadata.setValue({"ideal_blockshape": None}) opDataExport.Inputs[-1].connect(opMetadataOverride.Output) for slot in opDataExport.Inputs: assert slot.upstream_slot is not None
def __init__(self, *args, **kwargs): super(OpTiffSequenceReader, self).__init__(*args, **kwargs) self._readers = [] self._opStacker = OpMultiArrayStacker(parent=self) self._opStacker.AxisIndex.setValue(0) self.Output.connect(self._opStacker.Output)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._h5N5File = None self._readers = [] self._opStacker = OpMultiArrayStacker(parent=self) self._opStacker.AxisIndex.setValue(0)
def __init__(self, *args, **kwargs): super(OpStreamingHdf5SequenceReaderM, self).__init__(*args, **kwargs) self._hdf5Files = [] self._readers = [] self._opStacker = OpMultiArrayStacker(parent=self) self._opStacker.AxisIndex.setValue(0)