def __init__(self,
                 shell,
                 headless,
                 workflow_cmdline_args,
                 project_creation_args,
                 appendBatchOperators=True,
                 *args,
                 **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()
        super(AutocontextClassificationWorkflow,
              self).__init__(shell,
                             headless,
                             workflow_cmdline_args,
                             project_creation_args,
                             graph=graph,
                             *args,
                             **kwargs)
        self._applets = []

        ## Create applets
        self.projectMetadataApplet = ProjectMetadataApplet()
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            batchDataGui=False)
        self.featureSelectionApplet = FeatureSelectionApplet(
            self, "Feature Selection", "FeatureSelections")
        self.pcApplet = AutocontextClassificationApplet(
            self, "PixelClassification")

        # Autocontext constant
        opClassifyTopLevel = self.pcApplet.topLevelOperator
        opClassifyTopLevel.AutocontextIterations.setValue(2)

        # Expose for shell
        self._applets.append(self.projectMetadataApplet)
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.featureSelectionApplet)
        self._applets.append(self.pcApplet)

        if appendBatchOperators:
            # Create applets for batch workflow
            self.batchInputApplet = DataSelectionApplet(
                self,
                "Batch Prediction Input Selections",
                "BatchDataSelection",
                supportIlastik05Import=False,
                batchDataGui=True)
            self.batchResultsApplet = BatchIoApplet(
                self, "Batch Prediction Output Locations")

            # Expose in shell
            self._applets.append(self.batchInputApplet)
            self._applets.append(self.batchResultsApplet)

            # Connect batch workflow (NOT lane-based)
            self._initBatchWorkflow()
    def __init__(self,
                 headless=False,
                 workflow_cmdline_args=(),
                 parent=None,
                 graph=None):
        # Create a graph to be shared by all operators
        graph = Graph()
        super(LabelImageViewerWorkflow,
              self).__init__(headless=headless,
                             workflow_cmdline_args=workflow_cmdline_args,
                             parent=parent,
                             graph=graph)
        self._applets = []

        # Two data selection applets...
        self.rawDataSelectionApplet = DataSelectionApplet(
            self,
            "Raw Data",
            "Raw Data",
            supportIlastik05Import=False,
            batchDataGui=False)
        self.labelDataSelectionApplet = DataSelectionApplet(
            self,
            "Label Image",
            "Label Image",
            supportIlastik05Import=False,
            batchDataGui=False)
        self.viewerApplet = LabelImageViewerApplet(self)

        # Expose for shell
        self._applets.append(self.rawDataSelectionApplet)
        self._applets.append(self.labelDataSelectionApplet)
        self._applets.append(self.viewerApplet)
Ejemplo n.º 3
0
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, *args, **kwargs):

        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(
            shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs
        )
        self._applets = []

        # Instantiate DataSelection applet
        self.dataSelectionApplet = DataSelectionApplet(
            self, "Input Data", "Input Data", supportIlastik05Import=True, forceAxisOrder=None
        )

        # Configure global DataSelection settings
        role_names = ["Input Data"]
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(role_names)

        # Instantiate DataExport applet
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)
        opDataExport.SelectionNames.setValue(["Input"])

        # No special data pre/post processing necessary in this workflow,
        #   but this is where we'd hook it up if we needed it.
        #
        # self.dataExportApplet.prepare_for_entire_export = self.prepare_for_entire_export
        # self.dataExportApplet.prepare_lane_for_export = self.prepare_lane_for_export
        # self.dataExportApplet.post_process_lane_export = self.post_process_lane_export
        # self.dataExportApplet.post_process_entire_export = self.post_process_entire_export

        # Instantiate BatchProcessing applet
        self.batchProcessingApplet = BatchProcessingApplet(
            self, "Batch Processing", self.dataSelectionApplet, self.dataExportApplet
        )

        # Expose our applets in a list (for the shell to use)
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.dataExportApplet)
        self._applets.append(self.batchProcessingApplet)

        # Parse command-line arguments
        # Command-line args are applied in onProjectLoaded(), below.
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args(workflow_cmdline_args)
            self._batch_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args(
                unused_args, role_names
            )
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warning("Unused command-line args: {}".format(unused_args))
Ejemplo n.º 4
0
    def __init__(self,
                 headless,
                 workflow_cmdline_args,
                 appendBatchOperators=True,
                 *args,
                 **kwargs):
        graph = kwargs['graph'] if 'graph' in kwargs else Graph()
        if 'graph' in kwargs: del kwargs['graph']
        super(CountingWorkflow, self).__init__(headless,
                                               graph=graph,
                                               *args,
                                               **kwargs)

        ######################
        # Interactive workflow
        ######################

        self.projectMetadataApplet = ProjectMetadataApplet()

        self.dataSelectionApplet = DataSelectionApplet(self,
                                                       "Data Selection",
                                                       "DataSelection",
                                                       batchDataGui=False,
                                                       force5d=False)
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(['Raw Data'])

        self.featureSelectionApplet = FeatureSelectionApplet(
            self, "Feature Selection", "FeatureSelections")

        #self.pcApplet = PixelClassificationApplet(self, "PixelClassification")
        self.countingApplet = CountingApplet(workflow=self)

        self._applets = []
        self._applets.append(self.projectMetadataApplet)
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.featureSelectionApplet)
        self._applets.append(self.countingApplet)

        if appendBatchOperators:
            # Create applets for batch workflow
            self.batchInputApplet = DataSelectionApplet(
                self,
                "Batch Prediction Input Selections",
                "BatchDataSelection",
                supportIlastik05Import=False,
                batchDataGui=True)
            self.batchResultsApplet = CountingBatchResultsApplet(
                self, "Batch Prediction Output Locations")

            # Expose in shell
            self._applets.append(self.batchInputApplet)
            self._applets.append(self.batchResultsApplet)

            # Connect batch workflow (NOT lane-based)
            self._initBatchWorkflow()
Ejemplo n.º 5
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args, *args, **kwargs):

        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(shell,
                                                     headless,
                                                     workflow_cmdline_args,
                                                     project_creation_args,
                                                     graph=graph,
                                                     *args,
                                                     **kwargs)
        self._applets = []

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            batchDataGui=False,
            force5d=False)

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        role_names = ["Input Data"]
        opDataSelection.DatasetRoles.setValue(role_names)

        self.dataExportApplet = DataExportApplet(self, "Data Export")

        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)
        opDataExport.SelectionNames.setValue(["Input"])

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.dataExportApplet)

        # Parse command-line arguments
        # Command-line args are applied in onProjectLoaded(), below.
        self._workflow_cmdline_args = workflow_cmdline_args
        self._data_input_args = None
        self._data_export_args = None
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args(
                unused_args)
            self._data_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args(
                workflow_cmdline_args, role_names)
            if unused_args:
                logger.warn("Unused command-line args: {}".format(unused_args))
    def __init__(self, headless, workflow_cmdline_args, *args, **kwargs):

        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(headless,
                                                     graph=graph,
                                                     *args,
                                                     **kwargs)
        self._applets = []

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            batchDataGui=False,
            force5d=False)

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(["Input Data"])

        self.dataExportApplet = DataExportApplet(self, "Data Export")

        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.dataExportApplet)

        self._workflow_cmdline_args = workflow_cmdline_args
Ejemplo n.º 7
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args):
        # Create a graph to be shared by all operators
        graph = Graph()
        super(ThresholdMaskingWorkflow, self).__init__(shell,
                                                       headless,
                                                       workflow_cmdline_args,
                                                       project_creation_args,
                                                       graph=graph)
        self._applets = []

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            batchDataGui=False)
        self.thresholdMaskingApplet = ThresholdMaskingApplet(
            self, "Thresholding", "Thresholding Stage 1")
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(self.ROLE_NAMES)

        # Instantiate DataExport applet
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)
        opDataExport.SelectionNames.setValue(self.EXPORT_NAMES)

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.thresholdMaskingApplet)
        self._applets.append(self.dataExportApplet)
Ejemplo n.º 8
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args, *args, **kwargs):

        # Create a graph to be shared by all operators
        graph = Graph()
        super(LayerViewerWorkflow, self).__init__(shell,
                                                  headless,
                                                  workflow_cmdline_args,
                                                  project_creation_args,
                                                  graph=graph,
                                                  *args,
                                                  **kwargs)
        self._applets = []

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            forceAxisOrder='tzyxc')
        self.viewerApplet = LayerViewerApplet(self)
        self.dataExportApplet = DataExportApplet(self, "Data Export")
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.SelectionNames.setValue(['Raw Data', 'Other Data'])

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(["Raw Data", "Other Data"])

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.viewerApplet)
        self._applets.append(self.dataExportApplet)

        self._workflow_cmdline_args = workflow_cmdline_args
Ejemplo n.º 9
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_workflow, *args, **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()

        super(VigraWatershedWorkflow, self).__init__(shell,
                                                     headless,
                                                     workflow_cmdline_args,
                                                     project_creation_workflow,
                                                     graph=graph,
                                                     *args,
                                                     **kwargs)
        self._applets = []

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            batchDataGui=False)
        self.watershedApplet = VigraWatershedViewerApplet(
            self, "Watershed", "Watershed")

        # Dataset inputs
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(['Raw Data'])

        # Expose to shell
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.watershedApplet)
Ejemplo n.º 10
0
    def __init__(self, carvingGraphFile):
        super(CarvingWorkflow, self).__init__()
        self._applets = []

        graph = Graph()
        self._graph = graph

        ## Create applets 
        self.projectMetadataApplet = ProjectMetadataApplet()
        self.dataSelectionApplet = DataSelectionApplet(graph, "Input Data", "Input Data", supportIlastik05Import=True, batchDataGui=False)

        self.carvingApplet = CarvingApplet(graph, "xxx", carvingGraphFile)
        self.carvingApplet.topLevelOperator.RawData.connect( self.dataSelectionApplet.topLevelOperator.Image )
        self.carvingApplet.topLevelOperator.opLabeling.LabelsAllowedFlags.connect( self.dataSelectionApplet.topLevelOperator.AllowLabels )
        self.carvingApplet.gui.minLabelNumber = 2
        self.carvingApplet.gui.maxLabelNumber = 2

        ## Access applet operators
        opData = self.dataSelectionApplet.topLevelOperator
        
        ## Connect operators ##
        
        self._applets.append(self.projectMetadataApplet)
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.carvingApplet)

        # The shell needs a slot from which he can read the list of image names to switch between.
        # Use an OpAttributeSelector to create a slot containing just the filename from the OpDataSelection's DatasetInfo slot.
        opSelectFilename = OperatorWrapper( OpAttributeSelector, graph=graph )
        opSelectFilename.InputObject.connect( opData.Dataset )
        opSelectFilename.AttributeName.setValue( 'filePath' )

        self._imageNameListSlot = opSelectFilename.Result
Ejemplo n.º 11
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args, *args, **kwargs):

        # Create a graph to be shared by all operators
        graph = Graph()
        super(LayerViewerWorkflow, self).__init__(shell,
                                                  headless,
                                                  workflow_cmdline_args,
                                                  project_creation_args,
                                                  graph=graph,
                                                  *args,
                                                  **kwargs)
        self._applets = []

        # Roles

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(self, "Input Data",
                                                       "Input Data")
        self.viewerApplet = LayerViewerApplet(self)
        self.dataExportApplet = DataExportApplet(self, "Data Export")
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.SelectionNames.setValue(ROLES)

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(ROLES)

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.viewerApplet)
        self._applets.append(self.dataExportApplet)

        self._workflow_cmdline_args = workflow_cmdline_args
Ejemplo n.º 12
0
    def setupInputs(self):
        data_instructions = 'Use the "Raw Data" tab on the right to load your intensity image(s).'
        
        self.dataSelectionApplet = DataSelectionApplet( self, 
                                                        "Input Data", 
                                                        "Input Data", 
                                                        batchDataGui=False,
                                                        forceAxisOrder=None, 
                                                        instructionText=data_instructions )
        opData = self.dataSelectionApplet.topLevelOperator
        opData.DatasetRoles.setValue(['Raw Data'])

        self.featureSelectionApplet = FeatureSelectionApplet(
            self,
            "Feature Selection",
            "FeatureSelections",
        )

        self.pcApplet = PixelClassificationApplet(
            self, "PixelClassification")
        self.thresholdingApplet = ThresholdTwoLevelsApplet(
            self, "Thresholding", "ThresholdTwoLevels")

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.featureSelectionApplet)
        self._applets.append(self.pcApplet)
        self._applets.append(self.thresholdingApplet)

        if not self._headless:
            self._shell.currentAppletChanged.connect( self.handle_applet_changed )
Ejemplo n.º 13
0
    def __init__(self, headless, workflow_cmdline_args, *args, **kwargs):
        
        # Create a graph to be shared by all operators
        graph = Graph()
        super(LayerViewerWorkflow, self).__init__(headless, graph=graph, *args, **kwargs)
        self._applets = []

        # Create applets 
        self.dataSelectionApplet = DataSelectionApplet(self, 
                                                       "Input Data", 
                                                       "Input Data", 
                                                       supportIlastik05Import=True, 
                                                       batchDataGui=False,
                                                       force5d=True)
        self.viewerApplet = LayerViewerApplet(self)
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( ["Raw Data", "Other Data"] )

        self._applets.append( self.dataSelectionApplet )
        self._applets.append( self.viewerApplet )
        self._applets.append( self.dataExportApplet )

        self._workflow_cmdline_args = workflow_cmdline_args
Ejemplo n.º 14
0
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, *args, **kwargs):

        
        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs)
        self._applets = []

        # Instantiate DataSelection applet
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            forceAxisOrder=None)

        # Configure global DataSelection settings
        role_names = ["Input Data"]
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( role_names )

        # Instantiate DataExport applet
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( ["Input"] )        

        # No special data pre/post processing necessary in this workflow, 
        #   but this is where we'd hook it up if we needed it.
        #
        #self.dataExportApplet.prepare_for_entire_export = self.prepare_for_entire_export
        #self.dataExportApplet.prepare_lane_for_export = self.prepare_lane_for_export
        #self.dataExportApplet.post_process_lane_export = self.post_process_lane_export
        #self.dataExportApplet.post_process_entire_export = self.post_process_entire_export

        # Instantiate BatchProcessing applet
        self.batchProcessingApplet = BatchProcessingApplet(self, 
                                                           "Batch Processing", 
                                                           self.dataSelectionApplet, 
                                                           self.dataExportApplet)

        # Expose our applets in a list (for the shell to use)
        self._applets.append( self.dataSelectionApplet )
        self._applets.append( self.dataExportApplet )
        self._applets.append(self.batchProcessingApplet)

        # Parse command-line arguments
        # Command-line args are applied in onProjectLoaded(), below.
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args( workflow_cmdline_args )
            self._batch_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args( unused_args, role_names )
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warning("Unused command-line args: {}".format( unused_args ))
    def setupInputs(self):
        data_instructions = 'Use the "Raw Data" tab on the right to load your intensity image(s).'
        
        self.dataSelectionApplet = DataSelectionApplet( self, 
                                                        "Input Data", 
                                                        "Input Data", 
                                                        batchDataGui=False,
                                                        forceAxisOrder=None, 
                                                        instructionText=data_instructions )
        opData = self.dataSelectionApplet.topLevelOperator
        opData.DatasetRoles.setValue(['Raw Data'])

        self.featureSelectionApplet = FeatureSelectionApplet(
            self,
            "Feature Selection",
            "FeatureSelections",
            filter_implementation=self.filter_implementation
        )

        self.pcApplet = PixelClassificationApplet(
            self, "PixelClassification")
        self.thresholdingApplet = ThresholdTwoLevelsApplet(
            self, "Thresholding", "ThresholdTwoLevels")

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.featureSelectionApplet)
        self._applets.append(self.pcApplet)
        self._applets.append(self.thresholdingApplet)
Ejemplo n.º 16
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args, *args, **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()
        super(FillMissingSlicesWorkflow, self).__init__(shell,
                                                        headless,
                                                        workflow_cmdline_args,
                                                        project_creation_args,
                                                        graph=graph,
                                                        *args,
                                                        **kwargs)
        self._applets = []

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            batchDataGui=False)
        self.fillMissingSlicesApplet = FillMissingSlicesApplet(
            self, "Fill Missing Slices", "Fill Missing Slices")

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.fillMissingSlicesApplet)
Ejemplo n.º 17
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args, *args, **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()
        super(LabelingWorkflow, self).__init__(shell,
                                               headless,
                                               workflow_cmdline_args,
                                               project_creation_args,
                                               graph=graph,
                                               *args,
                                               **kwargs)
        self._applets = []

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            batchDataGui=False)
        self.labelingSingleLaneApplet = LabelingSingleLaneApplet(
            self, "Generic Labeling (single-lane)")
        self.labelingMultiLaneApplet = LabelingApplet(
            self, "Generic Labeling (multi-lane)")

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(["Raw Data"])

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.labelingSingleLaneApplet)
        self._applets.append(self.labelingMultiLaneApplet)
    def __init__( self, headless, workflow_cmdline_args, *args, **kwargs ):
        graph = kwargs['graph'] if 'graph' in kwargs else Graph()
        if 'graph' in kwargs: del kwargs['graph']
        super(ChaingraphTrackingWorkflow, self).__init__(headless=headless, graph=graph, *args, **kwargs)
        data_instructions = 'Use the "Raw Data" tab to load your intensity image(s).\n\n'\
                            'Use the "Prediction Maps" tab to load your pixel-wise probability image(s).'
        ## Create applets 
        self.dataSelectionApplet = DataSelectionApplet(self,
                                                       "Input Data",
                                                       "Input Data",
                                                       batchDataGui=False,
                                                       force5d=True,
                                                       instructionText=data_instructions,
                                                       max_lanes=1 )

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( ['Raw Data', 'Prediction Maps'] )
                
        self.thresholdTwoLevelsApplet = ThresholdTwoLevelsApplet( self, 
                                                                  "Threshold and Size Filter", 
                                                                  "ThresholdTwoLevels" )
        
        self.objectExtractionApplet = ObjectExtractionApplet( name="Object Feature Computation",
                                                              workflow=self, interactive=False )
        
        self.trackingApplet = ChaingraphTrackingApplet( workflow=self )
        
        self._applets = []                
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.thresholdTwoLevelsApplet)
        self._applets.append(self.objectExtractionApplet)
        self._applets.append(self.trackingApplet)
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args, *args, **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()
        super(PredictionViewerWorkflow, self).__init__(shell,
                                                       headless,
                                                       workflow_cmdline_args,
                                                       project_creation_args,
                                                       graph=graph,
                                                       *args,
                                                       **kwargs)
        self._applets = []

        # Applets for training (interactive) workflow
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Viewed Predictions",
            "Viewed Predictions",
            supportIlastik05Import=False,
            batchDataGui=False)
        self.viewerApplet = PredictionViewerApplet(self)

        # Expose for shell
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.viewerApplet)
Ejemplo n.º 20
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args):
        # Create a graph to be shared by all operators
        graph = Graph()
        super(DeviationFromMeanWorkflow, self).__init__(shell,
                                                        headless,
                                                        workflow_cmdline_args,
                                                        project_creation_args,
                                                        graph=graph)
        self._applets = []

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            batchDataGui=False)
        self.deviationFromMeanApplet = DeviationFromMeanApplet(
            self, "Deviation From Mean")
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(['Raw Data'])

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.deviationFromMeanApplet)
    def __init__(self):
        super(VigraWatershedWorkflow, self).__init__()
        self._applets = []

        # Create a graph to be shared by all operators
        graph = Graph()
        self._graph = graph

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            graph,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            batchDataGui=False)
        self.watershedApplet = VigraWatershedViewerApplet(
            graph, "Watershed", "Watershed")

        # Connect top-level operators
        self.watershedApplet.topLevelOperator.InputImage.connect(
            self.dataSelectionApplet.topLevelOperator.Image)

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.watershedApplet)

        # The shell needs a slot from which he can read the list of image names to switch between.
        # Use an OpAttributeSelector to create a slot containing just the filename from the OpDataSelection's DatasetInfo slot.
        opSelectFilename = OperatorWrapper(OpAttributeSelector, graph=graph)
        opSelectFilename.InputObject.connect(
            self.dataSelectionApplet.topLevelOperator.Dataset)
        opSelectFilename.AttributeName.setValue('filePath')

        self._imageNameListSlot = opSelectFilename.Result
        def __init__(
            self,
            shell,
            headless,
            workflow_cmdline_args,
            project_creation_args,
            hintoverlayFile=None,
            pmapoverlayFile=None,
            *args,
            **kwargs,
        ):
            if workflow_cmdline_args:
                assert False, "Not using workflow cmdline args yet."

            if hintoverlayFile is not None:
                assert isinstance(hintoverlayFile, str), "hintoverlayFile should be a string, not '%s'" % type(
                    hintoverlayFile
                )
            if pmapoverlayFile is not None:
                assert isinstance(pmapoverlayFile, str), "pmapoverlayFile should be a string, not '%s'" % type(
                    pmapoverlayFile
                )

            graph = Graph()

            super(CarvingFromPixelPredictionsWorkflow, self).__init__(
                shell, headless, workflow_cmdline_args, project_creation_args, *args, graph=graph, **kwargs
            )

            ## Create applets
            self.dataSelectionApplet = DataSelectionApplet(
                self, "Input Data", "Input Data", supportIlastik05Import=True, batchDataGui=False
            )
            opDataSelection = self.dataSelectionApplet.topLevelOperator
            opDataSelection.DatasetRoles.setValue(["Raw Data"])

            self.featureSelectionApplet = FeatureSelectionApplet(self, "Feature Selection", "FeatureSelections")
            self.pixelClassificationApplet = PixelClassificationApplet(self, "PixelClassification")

            self.carvingApplet = CarvingApplet(
                workflow=self,
                projectFileGroupName="carving",
                hintOverlayFile=hintoverlayFile,
                pmapOverlayFile=pmapoverlayFile,
            )

            self.preprocessingApplet = PreprocessingApplet(
                workflow=self, title="Preprocessing", projectFileGroupName="carving"
            )

            # self.carvingApplet.topLevelOperator.MST.connect(self.preprocessingApplet.topLevelOperator.PreprocessedData)

            # Expose to shell
            self._applets = []
            self._applets.append(self.dataSelectionApplet)
            self._applets.append(self.featureSelectionApplet)
            self._applets.append(self.pixelClassificationApplet)
            self._applets.append(self.preprocessingApplet)
            self._applets.append(self.carvingApplet)
Ejemplo n.º 23
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args, *args, **kwargs):
        graph = kwargs['graph'] if 'graph' in kwargs else Graph()
        if 'graph' in kwargs: del kwargs['graph']
        super(ManualTrackingWorkflow, self).__init__(shell,
                                                     headless,
                                                     workflow_cmdline_args,
                                                     project_creation_args,
                                                     graph=graph,
                                                     *args,
                                                     **kwargs)

        data_instructions = 'Use the "Raw Data" tab to load your intensity image(s).\n\n'\
                            'Use the "Prediction Maps" tab to load your pixel-wise probability image(s).'
        ## Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            forceAxisOrder=['txyzc'],
            instructionText=data_instructions,
            max_lanes=1)
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(['Raw Data', 'Prediction Maps'])

        self.thresholdTwoLevelsApplet = ThresholdTwoLevelsApplet(
            self, "Threshold and Size Filter", "ThresholdTwoLevels")

        self.objectExtractionApplet = ObjectExtractionApplet(
            name="Object Feature Computation",
            workflow=self,
            interactive=False)

        self.trackingApplet = ManualTrackingApplet(workflow=self)
        self.default_export_filename = '{dataset_dir}/{nickname}-exported_data.csv'
        self.dataExportApplet = TrackingBaseDataExportApplet(
            self,
            "Tracking Result Export",
            default_export_filename=self.default_export_filename,
        )

        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.SelectionNames.setValue(
            ['Manual Tracking', 'Object Identities'])
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)

        # Extra configuration for object export table (as CSV table or HDF5 table)
        opTracking = self.trackingApplet.topLevelOperator
        self.dataExportApplet.set_exporting_operator(opTracking)
        #self.dataExportApplet.post_process_lane_export = self.post_process_lane_export

        self._applets = []
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.thresholdTwoLevelsApplet)
        self._applets.append(self.objectExtractionApplet)
        self._applets.append(self.trackingApplet)
        self._applets.append(self.dataExportApplet)
Ejemplo n.º 24
0
 def createDataSelectionApplet(self):
     """
     Can be overridden by subclasses, if they want to use
     special parameters to initialize the DataSelectionApplet.
     """
     data_instructions = "Select your input data using the 'Raw Data' tab shown on the right"
     return DataSelectionApplet(
         self, "Input Data", "Input Data", supportIlastik05Import=True, instructionText=data_instructions
     )
Ejemplo n.º 25
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args, *args, **kwargs):
        super(BlockwiseObjectClassificationWorkflow,
              self).__init__(shell, headless, workflow_cmdline_args,
                             project_creation_args, *args, **kwargs)

        ### INTERACTIVE ###

        # Create applet
        self.blockwiseObjectClassificationApplet = BlockwiseObjectClassificationApplet(
            self, "Blockwise Object Classification",
            "Blockwise Object Classification")

        # Expose for shell
        self._applets.append(self.blockwiseObjectClassificationApplet)

        ### BATCH ###

        # Create applets for batch workflow
        self.rawBatchInputApplet = DataSelectionApplet(
            self,
            "Raw Batch Input Selections",
            "RawBatchDataSelection",
            supportIlastik05Import=False,
            batchDataGui=True)

        # Create applets for batch workflow
        self.binaryBatchInputApplet = DataSelectionApplet(
            self,
            "Binary Batch Input Selections",
            "BinaryBatchDataSelection",
            supportIlastik05Import=False,
            batchDataGui=True)

        self.batchResultsApplet = BlockwiseObjectClassificationBatchApplet(
            self, "Batch Output Locations")

        # Expose in shell
        self._applets.append(self.rawBatchInputApplet)
        self._applets.append(self.binaryBatchInputApplet)
        self._applets.append(self.batchResultsApplet)

        # Connect batch workflow (NOT lane-based)
        self._initBatchWorkflow()
Ejemplo n.º 26
0
    def parse_known_cmdline_args(self, cmdline_args):
        # We use the same parser as the DataSelectionApplet
        parser = DataSelectionApplet.get_arg_parser(
            self.dataSelectionApplet.role_names)
        parser.add_argument(
            "--distributed",
            help=
            "Distributed mode. Used for running ilastik on HPCs via SLURM/srun/mpirun",
            action="store_true",
        )

        default_block_roi = Slice5D.all(x=slice(0, 256),
                                        y=slice(0, 256),
                                        z=slice(0, 256),
                                        t=slice(0, 1))

        def parse_distributed_block_roi(value: str) -> Slice5D:
            parsed = ast.literal_eval(value)
            if isinstance(parsed, dict):
                if not set(parsed.keys()).issubset("xyztc"):
                    raise ValueError(
                        f"Bad keys for distributed-block-roi: {value}")
                if not all(
                        isinstance(v, (int, None.__class__))
                        for v in parsed.values()):
                    raise ValueError(
                        f"Bad values for distributed-block-roi: {value}")
                overrides = {k: slice(0, int(v)) for k, v in parsed.items()}
            elif isinstance(parsed, int):
                overrides = {k: slice(0, parsed) for k in "xyz"}
            else:
                raise TypeError(
                    f"Could not convert value {value} into a Slice5D")

            return Slice5D(**{**default_block_roi.to_dict(), **overrides})

        parser.add_argument(
            "--distributed_block_roi",
            "--distributed-block-roi",
            help=textwrap.dedent("""
                Determines the dimensions of the blocks used to split the data in distributed mode.
                Values can be either:"
                    An integer, which will be interpreted as if the following dict was passed in:
                    {'x': value, 'y': value, 'z': value, 't': 1, 'c': None}

                    or a literal python Dict[str, Optional[int]], with keys in 'xyztc'.
                    Missing keys will default like so:
                    {'x': 256, 'y': 256, 'z': 256, 't': 1, 'c': None}
                    Use None anywhere in the dict to mean "the whole dimension".
                """),
            type=parse_distributed_block_roi,
            default=default_block_roi,
        )

        parsed_args, unused_args = parser.parse_known_args(cmdline_args)
        return parsed_args, unused_args
Ejemplo n.º 27
0
    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.

        If the user provided command-line arguments, apply them to the workflow operators.
        Currently, we support command-line configuration of:
        - DataSelection
        - Preprocessing, in which case preprocessing is immediately executed
        """
        # If input data files were provided on the command line, configure the DataSelection applet now.
        # (Otherwise, we assume the project already had a dataset selected.)
        input_data_args, unused_args = DataSelectionApplet.parse_known_cmdline_args(self.workflow_cmdline_args, DATA_ROLES)
        if input_data_args.raw_data:
            self.dataSelectionApplet.configure_operator_with_parsed_args(input_data_args)

        #
        # Parse the remaining cmd-line arguments
        #
        filter_indexes = { 'bright-lines' : OpFilter.HESSIAN_BRIGHT,
                           'dark-lines'   : OpFilter.HESSIAN_DARK,
                           'step-edges'   : OpFilter.STEP_EDGES,
                           'original'     : OpFilter.RAW,
                           'inverted'     : OpFilter.RAW_INVERTED }

        parser = argparse.ArgumentParser()
        parser.add_argument('--run-preprocessing', action='store_true')
        parser.add_argument('--preprocessing-sigma', type=float, required=False)
        parser.add_argument('--preprocessing-filter', required=False, type=str.lower,
                            choices=filter_indexes.keys())

        parsed_args, unused_args = parser.parse_known_args(unused_args)
        if unused_args:
            logger.warn("Did not use the following command-line arguments: {}".format(unused_args))

        # Execute pre-processing.
        if parsed_args.run_preprocessing:
            if len(self.preprocessingApplet.topLevelOperator) != 1:
                raise RuntimeError("Can't run preprocessing on a project with no images.")

            opPreprocessing = self.preprocessingApplet.topLevelOperator.getLane(0) # Carving has only one 'lane'

            # If user provided parameters, override the defaults.
            if parsed_args.preprocessing_sigma is not None:
                opPreprocessing.Sigma.setValue(parsed_args.preprocessing_sigma)

            if parsed_args.preprocessing_filter:
                filter_index = filter_indexes[parsed_args.preprocessing_filter]
                opPreprocessing.Filter.setValue(filter_index)

            logger.info("Running Preprocessing...")
            opPreprocessing.PreprocessedData[:].wait()
            logger.info("FINISHED Preprocessing...")

            logger.info("Saving project...")
            self._shell.projectManager.saveProject()
            logger.info("Done saving.")
 def createDataSelectionApplet(self):
     data_instructions = "Select your input data using the 'Raw Data' tab shown on the right"
     return DataSelectionApplet(
         self,
         "Input Data",
         "Input Data",
         supportIlastik05Import=True,
         batchDataGui=False,
         instructionText=data_instructions,
         show_axis_details=self.supports_anisotropic_data)
Ejemplo n.º 29
0
 def _createInputAndConfigApplets(self):
     data_instructions = "Select your input data using the 'Raw Data' tab shown on the right"
     self.dataSelectionApplet = DataSelectionApplet(
         self,
         "Input Data",
         "Input Data",
         instructionText=data_instructions)
     opDataSelection = self.dataSelectionApplet.topLevelOperator
     opDataSelection.DatasetRoles.setValue(self.ROLE_NAMES)
     self._applets.append(self.dataSelectionApplet)
Ejemplo n.º 30
0
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, hintoverlayFile=None, pmapoverlayFile=None, *args, **kwargs):
        if hintoverlayFile is not None:
            assert isinstance(hintoverlayFile, str), "hintoverlayFile should be a string, not '%s'" % type(hintoverlayFile)
        if pmapoverlayFile is not None:
            assert isinstance(pmapoverlayFile, str), "pmapoverlayFile should be a string, not '%s'" % type(pmapoverlayFile)

        graph = Graph()
        
        super(CarvingWorkflow, self).__init__(shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs)
        self.workflow_cmdline_args = workflow_cmdline_args
        
        data_instructions = "Select your input data using the 'Raw Data' tab shown on the right"
        
        ## Create applets 
        self.projectMetadataApplet = ProjectMetadataApplet()
        self.dataSelectionApplet = DataSelectionApplet( self,
                                                        "Input Data",
                                                        "Input Data",
                                                        supportIlastik05Import=True,
                                                        batchDataGui=False,
                                                        instructionText=data_instructions,
                                                        max_lanes=1 )
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( DATA_ROLES )
        
        self.preprocessingApplet = PreprocessingApplet(workflow=self,
                                           title = "Preprocessing",
                                           projectFileGroupName="preprocessing")
        
        self.carvingApplet = CarvingApplet(workflow=self,
                                           projectFileGroupName="carving",
                                           hintOverlayFile=hintoverlayFile,
                                           pmapOverlayFile=pmapoverlayFile)
        
        #self.carvingApplet.topLevelOperator.MST.connect(self.preprocessingApplet.topLevelOperator.PreprocessedData)
        
        # Expose to shell
        self._applets = []
        self._applets.append(self.projectMetadataApplet)
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.preprocessingApplet)
        self._applets.append(self.carvingApplet)
Ejemplo n.º 31
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args, *args, **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()
        super(RemoteServerWorkflow, self).__init__(shell,
                                                   headless,
                                                   workflow_cmdline_args,
                                                   project_creation_args,
                                                   graph=graph,
                                                   *args,
                                                   **kwargs)

        self._applets = []

        # Parse workflow-specific command-line args
        # TODO allow command-line (does this make sense? probably not...)
        # parsed_creation_args, unused_args = parser.parse_known_args(
        #     project_creation_args)

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Remote Computation Plug-in",
            "Input Data",
            supportIlastik05Import=True,
            batchDataGui=False)
        self.loginApplet = LoginApplet(self, "Log In")
        self.serverBrowserApplet = ServerBrowserApplet(self, "Remote Service")
        self.labelingSingleLaneApplet = LabelingSingleLaneApplet(
            self, "Labeling")
        self.sendToServApplet = SendToServApplet(self, "Request Service")

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(["Raw Data"])

        # Append applets
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.loginApplet)
        self._applets.append(self.serverBrowserApplet)
        self._applets.append(self.labelingSingleLaneApplet)
        self._applets.append(self.sendToServApplet)
Ejemplo n.º 32
0
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_workflow, *args, **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()

        super(WsdtWorkflow, self).__init__( shell, headless, workflow_cmdline_args, project_creation_workflow, graph=graph, *args, **kwargs)
        self._applets = []

        # -- DataSelection applet
        #
        self.dataSelectionApplet = DataSelectionApplet(self, "Input Data", "Input Data")

        # Dataset inputs
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( self.ROLE_NAMES )

        # -- Wsdt applet
        #
        self.wsdtApplet = WsdtApplet(self, "Watershed", "Wsdt Watershed")

        # -- DataExport applet
        #
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( self.EXPORT_NAMES )

        # -- BatchProcessing applet
        #
        self.batchProcessingApplet = BatchProcessingApplet(self,
                                                           "Batch Processing",
                                                           self.dataSelectionApplet,
                                                           self.dataExportApplet)

        # -- Expose applets to shell
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.wsdtApplet)
        self._applets.append(self.dataExportApplet)
        self._applets.append(self.batchProcessingApplet)

        # -- Parse command-line arguments
        #    (Command-line args are applied in onProjectLoaded(), below.)
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args( workflow_cmdline_args )
            self._batch_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args( unused_args, role_names )
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warning("Unused command-line args: {}".format( unused_args ))
Ejemplo n.º 33
0
    def _run_export_with_empty_batch_lane(self, role_input_paths, batch_lane_index, template_infos, progress_callback):
        """
        Configure the fresh batch lane with the given input files, and export the results.
        """
        assert role_input_paths[0], "At least one file must be provided for each dataset (the first role)."
        opDataSelectionBatchLaneView = self.dataSelectionApplet.topLevelOperator.getLane( batch_lane_index )

        # Apply new settings for each role
        for role_index, path_for_role in enumerate(role_input_paths):
            if not path_for_role:
                continue

            if template_infos[role_index]:
                info = copy.copy(template_infos[role_index])
            else:
                info = DatasetInfo()

            # Override the template settings with the current filepath.
            default_info = DataSelectionApplet.create_default_headless_dataset_info(path_for_role)
            info.filePath = default_info.filePath
            info.location = default_info.location
            info.nickname = default_info.nickname

            # Apply to the data selection operator
            opDataSelectionBatchLaneView.DatasetGroup[role_index].setValue(info)

        # Make sure nothing went wrong
        opDataExportBatchlaneView = self.dataExportApplet.topLevelOperator.getLane( batch_lane_index )
        assert opDataExportBatchlaneView.ImageToExport.ready()
        assert opDataExportBatchlaneView.ExportPath.ready()
        
        # New lanes were added.
        # Give the workflow a chance to restore anything that was unecessarily invalidated (e.g. classifiers)
        self.workflow.handleNewLanesAdded()
        
        # Call customization hook
        self.dataExportApplet.prepare_lane_for_export(batch_lane_index)

        # Finally, run the export
        logger.info("Exporting to {}".format( opDataExportBatchlaneView.ExportPath.value ))
        opDataExportBatchlaneView.progressSignal.subscribe(progress_callback)
        opDataExportBatchlaneView.run_export()

        # Call customization hook
        self.dataExportApplet.post_process_lane_export(batch_lane_index)
Ejemplo n.º 34
0
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, hintoverlayFile=None, pmapoverlayFile=None, *args, **kwargs):
        if hintoverlayFile is not None:
            assert isinstance(hintoverlayFile, str), "hintoverlayFile should be a string, not '%s'" % type(hintoverlayFile)
        if pmapoverlayFile is not None:
            assert isinstance(pmapoverlayFile, str), "pmapoverlayFile should be a string, not '%s'" % type(pmapoverlayFile)

        graph = Graph()
        
        super(CarvingWorkflow, self).__init__(shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs)
        self.workflow_cmdline_args = workflow_cmdline_args
        
        data_instructions = "Select your input data using the 'Raw Data' tab shown on the right.\n\n"\
                            "Additionally, you may optionally add an 'Overlay' data volume if it helps you annotate. (It won't be used for any computation.)"
        
        ## Create applets 
        self.projectMetadataApplet = ProjectMetadataApplet()
        self.dataSelectionApplet = DataSelectionApplet( self,
                                                        "Input Data",
                                                        "Input Data",
                                                        supportIlastik05Import=True,
                                                        batchDataGui=False,
                                                        instructionText=data_instructions,
                                                        max_lanes=1 )
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( DATA_ROLES )
        
        self.preprocessingApplet = PreprocessingApplet(workflow=self,
                                           title = "Preprocessing",
                                           projectFileGroupName="preprocessing")
        
        self.carvingApplet = CarvingApplet(workflow=self,
                                           projectFileGroupName="carving",
                                           hintOverlayFile=hintoverlayFile,
                                           pmapOverlayFile=pmapoverlayFile)
        
        #self.carvingApplet.topLevelOperator.MST.connect(self.preprocessingApplet.topLevelOperator.PreprocessedData)
        
        # Expose to shell
        self._applets = []
        self._applets.append(self.projectMetadataApplet)
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.preprocessingApplet)
        self._applets.append(self.carvingApplet)
def append_lane(workflow, input_filepath, axisorder=None):
    # Sanity checks
    assert isinstance(workflow, PixelClassificationWorkflow)
    opPixelClassification = workflow.pcApplet.topLevelOperator
    assert opPixelClassification.Classifier.ready()

    # If the filepath is a globstring, convert the stack to h5
    input_filepath = DataSelectionApplet.convertStacksToH5( [input_filepath], TMP_DIR )[0]

    info = DatasetInfo()
    info.location = DatasetInfo.Location.FileSystem
    info.filePath = input_filepath

    comp = PathComponents(input_filepath)

    # Convert all (non-url) paths to absolute 
    # (otherwise they are relative to the project file, which probably isn't what the user meant)        
    if not isUrl(input_filepath):
        comp.externalPath = os.path.abspath(comp.externalPath)
        info.filePath = comp.totalPath()
    info.nickname = comp.filenameBase
    if axisorder:
        info.axistags = vigra.defaultAxistags(axisorder)

    logger.debug( "adding lane: {}".format( info ) )

    opDataSelection = workflow.dataSelectionApplet.topLevelOperator

    # Add a lane
    num_lanes = len( opDataSelection.DatasetGroup )+1
    logger.debug( "num_lanes: {}".format( num_lanes ) )
    opDataSelection.DatasetGroup.resize( num_lanes )
    
    # Configure it.
    role_index = 0 # raw data
    opDataSelection.DatasetGroup[-1][role_index].setValue( info )

    # Sanity check
    assert len(opPixelClassification.InputImages) == num_lanes
    
    return opPixelClassification
def append_lane(workflow, input_filepath, axisorder=None):
    """
    Add a lane to the project file for the given input file.

    If axisorder is given, override the default axisorder for
    the file and force the project to use the given one.
    
    Globstrings are supported, in which case the files are converted to HDF5 first.
    """
    # If the filepath is a globstring, convert the stack to h5
    input_filepath = DataSelectionApplet.convertStacksToH5( [input_filepath], tempfile.mkdtemp() )[0]

    info = DatasetInfo()
    info.location = DatasetInfo.Location.FileSystem
    info.filePath = input_filepath

    comp = PathComponents(input_filepath)

    # Convert all (non-url) paths to absolute 
    # (otherwise they are relative to the project file, which probably isn't what the user meant)        
    if not isUrl(input_filepath):
        comp.externalPath = os.path.abspath(comp.externalPath)
        info.filePath = comp.totalPath()
    info.nickname = comp.filenameBase
    if axisorder:
        info.axistags = vigra.defaultAxistags(axisorder)

    logger.debug( "adding lane: {}".format( info ) )

    opDataSelection = workflow.dataSelectionApplet.topLevelOperator

    # Add a lane
    num_lanes = len( opDataSelection.DatasetGroup )+1
    logger.debug( "num_lanes: {}".format( num_lanes ) )
    opDataSelection.DatasetGroup.resize( num_lanes )
    
    # Configure it.
    role_index = 0 # raw data
    opDataSelection.DatasetGroup[-1][role_index].setValue( info )
Ejemplo n.º 37
0
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, *args, **kwargs):

        
        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs)
        self._applets = []

        # Create applets 
        self.dataSelectionApplet = DataSelectionApplet(self, 
                                                       "Input Data", 
                                                       "Input Data", 
                                                       supportIlastik05Import=True, 
                                                       batchDataGui=False,
                                                       force5d=False)

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        role_names = ["Input Data"]
        opDataSelection.DatasetRoles.setValue( role_names )

        self.dataExportApplet = DataExportApplet(self, "Data Export")

        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( ["Input"] )        

        self._applets.append( self.dataSelectionApplet )
        self._applets.append( self.dataExportApplet )

        # Parse command-line arguments
        # Command-line args are applied in onProjectLoaded(), below.
        self._workflow_cmdline_args = workflow_cmdline_args
        self._data_input_args = None
        self._data_export_args = None
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args( unused_args )
            self._data_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args( workflow_cmdline_args, role_names )
            if unused_args:
                logger.warn("Unused command-line args: {}".format( unused_args ))
Ejemplo n.º 38
0
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, appendBatchOperators=True, *args, **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()
        super( PixelClassificationWorkflow, self ).__init__( shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs )
        self._applets = []
        self._workflow_cmdline_args = workflow_cmdline_args

        # Parse workflow-specific command-line args
        parser = argparse.ArgumentParser()
        parser.add_argument('--filter', help="pixel feature filter implementation.", choices=['Original', 'Refactored', 'Interpolated'], default='Original')
        parser.add_argument('--print-labels-by-slice', help="Print the number of labels for each Z-slice of each image.", action="store_true")
        parser.add_argument('--label-search-value', help="If provided, only this value is considered when using --print-labels-by-slice", default=0, type=int)
        parser.add_argument('--generate-random-labels', help="Add random labels to the project file.", action="store_true")
        parser.add_argument('--random-label-value', help="The label value to use injecting random labels", default=1, type=int)
        parser.add_argument('--random-label-count', help="The number of random labels to inject via --generate-random-labels", default=2000, type=int)
        parser.add_argument('--retrain', help="Re-train the classifier based on labels stored in project file, and re-save.", action="store_true")

        # Parse the creation args: These were saved to the project file when this project was first created.
        parsed_creation_args, unused_args = parser.parse_known_args(project_creation_args)
        self.filter_implementation = parsed_creation_args.filter
        
        # Parse the cmdline args for the current session.
        parsed_args, unused_args = parser.parse_known_args(workflow_cmdline_args)
        self.print_labels_by_slice = parsed_args.print_labels_by_slice
        self.label_search_value = parsed_args.label_search_value
        self.generate_random_labels = parsed_args.generate_random_labels
        self.random_label_value = parsed_args.random_label_value
        self.random_label_count = parsed_args.random_label_count
        self.retrain = parsed_args.retrain

        if parsed_args.filter and parsed_args.filter != parsed_creation_args.filter:
            logger.error("Ignoring new --filter setting.  Filter implementation cannot be changed after initial project creation.")
        
        data_instructions = "Select your input data using the 'Raw Data' tab shown on the right.\n\n"\
                            "Power users: Optionally use the 'Prediction Mask' tab to supply a binary image that tells ilastik where it should avoid computations you don't need."

        # Applets for training (interactive) workflow 
        self.projectMetadataApplet = ProjectMetadataApplet()
        self.dataSelectionApplet = DataSelectionApplet( self,
                                                        "Input Data",
                                                        "Input Data",
                                                        supportIlastik05Import=True,
                                                        batchDataGui=False,
                                                        instructionText=data_instructions )
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        
        # see role constants, above
        opDataSelection.DatasetRoles.setValue( ['Raw Data', 'Prediction Mask'] )

        self.featureSelectionApplet = FeatureSelectionApplet(self, "Feature Selection", "FeatureSelections", self.filter_implementation)

        self.pcApplet = PixelClassificationApplet( self, "PixelClassification" )
        opClassify = self.pcApplet.topLevelOperator

        self.dataExportApplet = PixelClassificationDataExportApplet(self, "Prediction Export")
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.PmapColors.connect( opClassify.PmapColors )
        opDataExport.LabelNames.connect( opClassify.LabelNames )
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( self.EXPORT_NAMES )        

        # Expose for shell
        self._applets.append(self.projectMetadataApplet)
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.featureSelectionApplet)
        self._applets.append(self.pcApplet)
        self._applets.append(self.dataExportApplet)

        self._batch_input_args = None
        self._batch_export_args = None

        self.batchInputApplet = None
        self.batchResultsApplet = None
        if appendBatchOperators:
            # Create applets for batch workflow
            self.batchInputApplet = DataSelectionApplet(self, "Batch Prediction Input Selections", "Batch Inputs", supportIlastik05Import=False, batchDataGui=True)
            self.batchResultsApplet = PixelClassificationDataExportApplet(self, "Batch Prediction Output Locations", isBatch=True)
    
            # Expose in shell        
            self._applets.append(self.batchInputApplet)
            self._applets.append(self.batchResultsApplet)
    
            # Connect batch workflow (NOT lane-based)
            self._initBatchWorkflow()

            if unused_args:
                # We parse the export setting args first.  All remaining args are considered input files by the input applet.
                self._batch_export_args, unused_args = self.batchResultsApplet.parse_known_cmdline_args( unused_args )
                self._batch_input_args, unused_args = self.batchInputApplet.parse_known_cmdline_args( unused_args )
    
        if unused_args:
            logger.warn("Unused command-line args: {}".format( unused_args ))
Ejemplo n.º 39
0
class PixelClassificationWorkflow(Workflow):
    
    workflowName = "Pixel Classification"
    workflowDescription = "This is obviously self-explanatory."
    defaultAppletIndex = 1 # show DataSelection by default
    
    DATA_ROLE_RAW = 0
    DATA_ROLE_PREDICTION_MASK = 1
    
    EXPORT_NAMES = ['Probabilities', 'Simple Segmentation', 'Uncertainty', 'Features']
    
    @property
    def applets(self):
        return self._applets

    @property
    def imageNameListSlot(self):
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, appendBatchOperators=True, *args, **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()
        super( PixelClassificationWorkflow, self ).__init__( shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs )
        self._applets = []
        self._workflow_cmdline_args = workflow_cmdline_args

        # Parse workflow-specific command-line args
        parser = argparse.ArgumentParser()
        parser.add_argument('--filter', help="pixel feature filter implementation.", choices=['Original', 'Refactored', 'Interpolated'], default='Original')
        parser.add_argument('--print-labels-by-slice', help="Print the number of labels for each Z-slice of each image.", action="store_true")
        parser.add_argument('--label-search-value', help="If provided, only this value is considered when using --print-labels-by-slice", default=0, type=int)
        parser.add_argument('--generate-random-labels', help="Add random labels to the project file.", action="store_true")
        parser.add_argument('--random-label-value', help="The label value to use injecting random labels", default=1, type=int)
        parser.add_argument('--random-label-count', help="The number of random labels to inject via --generate-random-labels", default=2000, type=int)
        parser.add_argument('--retrain', help="Re-train the classifier based on labels stored in project file, and re-save.", action="store_true")

        # Parse the creation args: These were saved to the project file when this project was first created.
        parsed_creation_args, unused_args = parser.parse_known_args(project_creation_args)
        self.filter_implementation = parsed_creation_args.filter
        
        # Parse the cmdline args for the current session.
        parsed_args, unused_args = parser.parse_known_args(workflow_cmdline_args)
        self.print_labels_by_slice = parsed_args.print_labels_by_slice
        self.label_search_value = parsed_args.label_search_value
        self.generate_random_labels = parsed_args.generate_random_labels
        self.random_label_value = parsed_args.random_label_value
        self.random_label_count = parsed_args.random_label_count
        self.retrain = parsed_args.retrain

        if parsed_args.filter and parsed_args.filter != parsed_creation_args.filter:
            logger.error("Ignoring new --filter setting.  Filter implementation cannot be changed after initial project creation.")
        
        data_instructions = "Select your input data using the 'Raw Data' tab shown on the right.\n\n"\
                            "Power users: Optionally use the 'Prediction Mask' tab to supply a binary image that tells ilastik where it should avoid computations you don't need."

        # Applets for training (interactive) workflow 
        self.projectMetadataApplet = ProjectMetadataApplet()
        self.dataSelectionApplet = DataSelectionApplet( self,
                                                        "Input Data",
                                                        "Input Data",
                                                        supportIlastik05Import=True,
                                                        batchDataGui=False,
                                                        instructionText=data_instructions )
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        
        # see role constants, above
        opDataSelection.DatasetRoles.setValue( ['Raw Data', 'Prediction Mask'] )

        self.featureSelectionApplet = FeatureSelectionApplet(self, "Feature Selection", "FeatureSelections", self.filter_implementation)

        self.pcApplet = PixelClassificationApplet( self, "PixelClassification" )
        opClassify = self.pcApplet.topLevelOperator

        self.dataExportApplet = PixelClassificationDataExportApplet(self, "Prediction Export")
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.PmapColors.connect( opClassify.PmapColors )
        opDataExport.LabelNames.connect( opClassify.LabelNames )
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( self.EXPORT_NAMES )        

        # Expose for shell
        self._applets.append(self.projectMetadataApplet)
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.featureSelectionApplet)
        self._applets.append(self.pcApplet)
        self._applets.append(self.dataExportApplet)

        self._batch_input_args = None
        self._batch_export_args = None

        self.batchInputApplet = None
        self.batchResultsApplet = None
        if appendBatchOperators:
            # Create applets for batch workflow
            self.batchInputApplet = DataSelectionApplet(self, "Batch Prediction Input Selections", "Batch Inputs", supportIlastik05Import=False, batchDataGui=True)
            self.batchResultsApplet = PixelClassificationDataExportApplet(self, "Batch Prediction Output Locations", isBatch=True)
    
            # Expose in shell        
            self._applets.append(self.batchInputApplet)
            self._applets.append(self.batchResultsApplet)
    
            # Connect batch workflow (NOT lane-based)
            self._initBatchWorkflow()

            if unused_args:
                # We parse the export setting args first.  All remaining args are considered input files by the input applet.
                self._batch_export_args, unused_args = self.batchResultsApplet.parse_known_cmdline_args( unused_args )
                self._batch_input_args, unused_args = self.batchInputApplet.parse_known_cmdline_args( unused_args )
    
        if unused_args:
            logger.warn("Unused command-line args: {}".format( unused_args ))

    def connectLane(self, laneIndex):
        # Get a handle to each operator
        opData = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex)
        opTrainingFeatures = self.featureSelectionApplet.topLevelOperator.getLane(laneIndex)
        opClassify = self.pcApplet.topLevelOperator.getLane(laneIndex)
        opDataExport = self.dataExportApplet.topLevelOperator.getLane(laneIndex)
        
        # Input Image -> Feature Op
        #         and -> Classification Op (for display)
        opTrainingFeatures.InputImage.connect( opData.Image )
        opClassify.InputImages.connect( opData.Image )
        
        if ilastik_config.getboolean('ilastik', 'debug'):
            opClassify.PredictionMasks.connect( opData.ImageGroup[self.DATA_ROLE_PREDICTION_MASK] )
        
        # Feature Images -> Classification Op (for training, prediction)
        opClassify.FeatureImages.connect( opTrainingFeatures.OutputImage )
        opClassify.CachedFeatureImages.connect( opTrainingFeatures.CachedOutputImage )
        
        # Training flags -> Classification Op (for GUI restrictions)
        opClassify.LabelsAllowedFlags.connect( opData.AllowLabels )

        # Data Export connections
        opDataExport.RawData.connect( opData.ImageGroup[self.DATA_ROLE_RAW] )
        opDataExport.RawDatasetInfo.connect( opData.DatasetGroup[self.DATA_ROLE_RAW] )
        opDataExport.ConstraintDataset.connect( opData.ImageGroup[self.DATA_ROLE_RAW] )
        opDataExport.Inputs.resize( len(self.EXPORT_NAMES) )
        opDataExport.Inputs[0].connect( opClassify.HeadlessPredictionProbabilities )
        opDataExport.Inputs[1].connect( opClassify.SimpleSegmentation )
        opDataExport.Inputs[2].connect( opClassify.HeadlessUncertaintyEstimate )
        opDataExport.Inputs[3].connect( opClassify.FeatureImages )
        for slot in opDataExport.Inputs:
            assert slot.partner is not None

    def _initBatchWorkflow(self):
        """
        Connect the batch-mode top-level operators to the training workflow and to each other.
        """
        # Access applet operators from the training workflow
        opTrainingDataSelection = self.dataSelectionApplet.topLevelOperator
        opTrainingFeatures = self.featureSelectionApplet.topLevelOperator
        opClassify = self.pcApplet.topLevelOperator
        
        # Access the batch operators
        opBatchInputs = self.batchInputApplet.topLevelOperator
        opBatchResults = self.batchResultsApplet.topLevelOperator
        
        opBatchInputs.DatasetRoles.connect( opTrainingDataSelection.DatasetRoles )
        
        opSelectFirstLane = OperatorWrapper( OpSelectSubslot, parent=self )
        opSelectFirstLane.Inputs.connect( opTrainingDataSelection.ImageGroup )
        opSelectFirstLane.SubslotIndex.setValue(0)
        
        opSelectFirstRole = OpSelectSubslot( parent=self )
        opSelectFirstRole.Inputs.connect( opSelectFirstLane.Output )
        opSelectFirstRole.SubslotIndex.setValue(self.DATA_ROLE_RAW)
        
        opBatchResults.ConstraintDataset.connect( opSelectFirstRole.Output )
        
        ## Create additional batch workflow operators
        opBatchFeatures = OperatorWrapper( OpFeatureSelectionNoCache, operator_kwargs={'filter_implementation': self.filter_implementation}, parent=self, promotedSlotNames=['InputImage'] )
        opBatchPredictionPipeline = OperatorWrapper( OpPredictionPipelineNoCache, parent=self )
        
        ## Connect Operators ##
        opTranspose = OpTransposeSlots( parent=self )
        opTranspose.OutputLength.setValue(2) # There are 2 roles
        opTranspose.Inputs.connect( opBatchInputs.DatasetGroup )
        opTranspose.name = "batchTransposeInputs"
        
        # Provide dataset paths from data selection applet to the batch export applet
        opBatchResults.RawDatasetInfo.connect( opTranspose.Outputs[self.DATA_ROLE_RAW] )
        opBatchResults.WorkingDirectory.connect( opBatchInputs.WorkingDirectory )
        
        # Connect (clone) the feature operator inputs from 
        #  the interactive workflow's features operator (which gets them from the GUI)
        opBatchFeatures.Scales.connect( opTrainingFeatures.Scales )
        opBatchFeatures.FeatureIds.connect( opTrainingFeatures.FeatureIds )
        opBatchFeatures.SelectionMatrix.connect( opTrainingFeatures.SelectionMatrix )
        
        # Classifier and NumClasses are provided by the interactive workflow
        opBatchPredictionPipeline.Classifier.connect( opClassify.Classifier )
        opBatchPredictionPipeline.NumClasses.connect( opClassify.NumClasses )
        
        # Provide these for the gui
        opBatchResults.RawData.connect( opBatchInputs.Image )
        opBatchResults.PmapColors.connect( opClassify.PmapColors )
        opBatchResults.LabelNames.connect( opClassify.LabelNames )
        
        # Connect Image pathway:
        # Input Image -> Features Op -> Prediction Op -> Export
        opBatchFeatures.InputImage.connect( opBatchInputs.Image )
        opBatchPredictionPipeline.PredictionMask.connect( opBatchInputs.Image1 )
        opBatchPredictionPipeline.FeatureImages.connect( opBatchFeatures.OutputImage )

        opBatchResults.SelectionNames.setValue( self.EXPORT_NAMES )        
        # opBatchResults.Inputs is indexed by [lane][selection],
        # Use OpTranspose to allow connection.
        opTransposeBatchInputs = OpTransposeSlots( parent=self )
        opTransposeBatchInputs.name = "opTransposeBatchInputs"
        opTransposeBatchInputs.OutputLength.setValue(0)
        opTransposeBatchInputs.Inputs.resize( len(self.EXPORT_NAMES) )
        opTransposeBatchInputs.Inputs[0].connect( opBatchPredictionPipeline.HeadlessPredictionProbabilities ) # selection 0
        opTransposeBatchInputs.Inputs[1].connect( opBatchPredictionPipeline.SimpleSegmentation ) # selection 1
        opTransposeBatchInputs.Inputs[2].connect( opBatchPredictionPipeline.HeadlessUncertaintyEstimate ) # selection 2
        opTransposeBatchInputs.Inputs[3].connect( opBatchPredictionPipeline.FeatureImages ) # selection 3
        for slot in opTransposeBatchInputs.Inputs:
            assert slot.partner is not None
        
        # Now opTransposeBatchInputs.Outputs is level-2 indexed by [lane][selection]
        opBatchResults.Inputs.connect( opTransposeBatchInputs.Outputs )

        # We don't actually need the cached path in the batch pipeline.
        # Just connect the uncached features here to satisfy the operator.
        #opBatchPredictionPipeline.CachedFeatureImages.connect( opBatchFeatures.OutputImage )

        self.opBatchFeatures = opBatchFeatures
        self.opBatchPredictionPipeline = opBatchPredictionPipeline

    def handleAppletStateUpdateRequested(self):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.appletStateUpdateRequested`
        """
        # If no data, nothing else is ready.
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        input_ready = len(opDataSelection.ImageGroup) > 0 and not self.dataSelectionApplet.busy

        opFeatureSelection = self.featureSelectionApplet.topLevelOperator
        featureOutput = opFeatureSelection.OutputImage
        features_ready = input_ready and \
                         len(featureOutput) > 0 and  \
                         featureOutput[0].ready() and \
                         (TinyVector(featureOutput[0].meta.shape) > 0).all()

        opDataExport = self.dataExportApplet.topLevelOperator
        opPixelClassification = self.pcApplet.topLevelOperator

        invalid_classifier = opPixelClassification.classifier_cache.fixAtCurrent.value and \
                             opPixelClassification.classifier_cache.Output.ready() and\
                             opPixelClassification.classifier_cache.Output.value is None

        predictions_ready = features_ready and \
                            not invalid_classifier and \
                            len(opDataExport.Inputs) > 0 and \
                            opDataExport.Inputs[0][0].ready() and \
                            (TinyVector(opDataExport.Inputs[0][0].meta.shape) > 0).all()

        # Problems can occur if the features or input data are changed during live update mode.
        # Don't let the user do that.
        live_update_active = not opPixelClassification.FreezePredictions.value
        
        self._shell.setAppletEnabled(self.dataSelectionApplet, not live_update_active)
        self._shell.setAppletEnabled(self.featureSelectionApplet, input_ready and not live_update_active)
        self._shell.setAppletEnabled(self.pcApplet, features_ready)
        self._shell.setAppletEnabled(self.dataExportApplet, predictions_ready)

        if self.batchInputApplet is not None:
            # Training workflow must be fully configured before batch can be used
            self._shell.setAppletEnabled(self.batchInputApplet, predictions_ready)
    
            opBatchDataSelection = self.batchInputApplet.topLevelOperator
            batch_input_ready = predictions_ready and \
                                len(opBatchDataSelection.ImageGroup) > 0
            self._shell.setAppletEnabled(self.batchResultsApplet, batch_input_ready)
            
        # Lastly, check for certain "busy" conditions, during which we 
        #  should prevent the shell from closing the project.
        busy = False
        busy |= self.dataSelectionApplet.busy
        busy |= self.featureSelectionApplet.busy
        busy |= self.dataExportApplet.busy
        self._shell.enableProjectChanges( not busy )

    def getHeadlessOutputSlot(self, slotId):
        # "Regular" (i.e. with the images that the user selected as input data)
        if slotId == "Predictions":
            return self.pcApplet.topLevelOperator.HeadlessPredictionProbabilities
        elif slotId == "PredictionsUint8":
            return self.pcApplet.topLevelOperator.HeadlessUint8PredictionProbabilities
        # "Batch" (i.e. with the images that the user selected as batch inputs).
        elif slotId == "BatchPredictions":
            return self.opBatchPredictionPipeline.HeadlessPredictionProbabilities
        if slotId == "BatchPredictionsUint8":
            return self.opBatchPredictionPipeline.HeadlessUint8PredictionProbabilities
        
        raise Exception("Unknown headless output slot")


    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.
        
        If the user provided command-line arguments, use them to configure 
        the workflow for batch mode and export all results.
        (This workflow's headless mode supports only batch mode for now.)
        """
        if self.generate_random_labels:
            self._generate_random_labels(self.random_label_count, self.random_label_value)
            logger.info("Saving project...")
            self._shell.projectManager.saveProject()
            logger.info("Done.")
        
        if self.print_labels_by_slice:
            self._print_labels_by_slice( self.label_search_value )

        # Configure the batch data selection operator.
        if self._batch_input_args and (self._batch_input_args.input_files or self._batch_input_args.raw_data):
            self.batchInputApplet.configure_operator_with_parsed_args( self._batch_input_args )
        
        # Configure the data export operator.
        if self._batch_export_args:
            self.batchResultsApplet.configure_operator_with_parsed_args( self._batch_export_args )

        if self._batch_input_args and self.pcApplet.topLevelOperator.classifier_cache._dirty:
            logger.warn("Your project file has no classifier.  A new classifier will be trained for this run.")

        if self._headless:
            # In headless mode, let's see the messages from the training operator.
            logging.getLogger("lazyflow.operators.classifierOperators").setLevel(logging.DEBUG)
        
        if self.retrain:
            # Cause the classifier to be dirty so it is forced to retrain.
            # (useful if the stored labels were changed outside ilastik)
            self.pcApplet.topLevelOperator.opTrain.ClassifierFactory.setDirty()
            
            # Request the classifier, which forces training
            self.pcApplet.topLevelOperator.FreezePredictions.setValue(False)
            _ = self.pcApplet.topLevelOperator.Classifier.value

            # store new classifier to project file
            projectManager.saveProject(force_all_save=False)

        if self._headless and self._batch_input_args and self._batch_export_args:
            # Make sure we're using the up-to-date classifier.
            self.pcApplet.topLevelOperator.FreezePredictions.setValue(False)
        
            # Now run the batch export and report progress....
            opBatchDataExport = self.batchResultsApplet.topLevelOperator
            for i, opExportDataLaneView in enumerate(opBatchDataExport):
                logger.info( "Exporting result {} to {}".format(i, opExportDataLaneView.ExportPath.value) )
    
                sys.stdout.write( "Result {}/{} Progress: ".format( i, len( opBatchDataExport ) ) )
                sys.stdout.flush()
                def print_progress( progress ):
                    sys.stdout.write( "{} ".format( progress ) )
                    sys.stdout.flush()
    
                # If the operator provides a progress signal, use it.
                slotProgressSignal = opExportDataLaneView.progressSignal
                slotProgressSignal.subscribe( print_progress )
                opExportDataLaneView.run_export()
                
                # Finished.
                sys.stdout.write("\n")


    def _print_labels_by_slice(self, search_value):
        """
        Iterate over each label image in the project and print the number of labels present on each Z-slice of the image.
        (This is a special feature requested by the FlyEM proofreaders.)
        """
        opTopLevelClassify = self.pcApplet.topLevelOperator
        project_label_count = 0
        for image_index, label_slot in enumerate(opTopLevelClassify.LabelImages):
            tagged_shape = label_slot.meta.getTaggedShape()
            if 'z' not in tagged_shape:
                logger.error("Can't print label counts by Z-slices.  Image #{} has no Z-dimension.".format(image_index))
            else:
                logger.info("Label counts in Z-slices of Image #{}:".format( image_index ))
                slicing = [slice(None)] * len(tagged_shape)
                blank_slices = []
                image_label_count = 0
                for z in range(tagged_shape['z']):
                    slicing[tagged_shape.keys().index('z')] = slice(z, z+1)
                    label_slice = label_slot[slicing].wait()
                    if search_value:                        
                        count = (label_slice == search_value).sum()
                    else:
                        count = (label_slice != 0).sum()
                    if count > 0:
                        logger.info("Z={}: {}".format( z, count ))
                        image_label_count += count
                    else:
                        blank_slices.append( z )
                project_label_count += image_label_count
                if len(blank_slices) > 20:
                    # Don't list the blank slices if there were a lot of them.
                    logger.info("Image #{} has {} blank slices.".format( image_index, len(blank_slices) ))
                elif len(blank_slices) > 0:
                    logger.info( "Image #{} has {} blank slices: {}".format( image_index, len(blank_slices), blank_slices ) )
                else:
                    logger.info( "Image #{} has no blank slices.".format( image_index ) )
                logger.info( "Total labels for Image #{}: {}".format( image_index, image_label_count ) )
        logger.info( "Total labels for project: {}".format( project_label_count ) )

    
    def _generate_random_labels(self, labels_per_image, label_value):
        """
        Inject random labels into the project file.
        (This is a special feature requested by the FlyEM proofreaders.)
        """
        logger.info( "Injecting {} labels of value {} into all images.".format( labels_per_image, label_value ) )
        opTopLevelClassify = self.pcApplet.topLevelOperator
        
        label_names = copy.copy(opTopLevelClassify.LabelNames.value)
        while len(label_names) < label_value:
            label_names.append( "Label {}".format( len(label_names)+1 ) )
        
        opTopLevelClassify.LabelNames.setValue( label_names )
        
        for image_index in range(len(opTopLevelClassify.LabelImages)):
            logger.info( "Injecting labels into image #{}".format( image_index ) )
            # For reproducibility of label generation
            SEED = 1
            numpy.random.seed([SEED, image_index])
        
            label_input_slot = opTopLevelClassify.LabelInputs[image_index]
            label_output_slot = opTopLevelClassify.LabelImages[image_index]
        
            shape = label_output_slot.meta.shape
            random_labels = numpy.zeros( shape=shape, dtype=numpy.uint8 )
            num_pixels = len(random_labels.flat)
            current_progress = -1
            for sample_index in range(labels_per_image):
                flat_index = numpy.random.randint(0,num_pixels)
                # Don't overwrite existing labels
                # Keep looking until we find a blank pixel
                while random_labels.flat[flat_index]:
                    flat_index = numpy.random.randint(0,num_pixels)
                random_labels.flat[flat_index] = label_value

                # Print progress every 10%
                progress = float(sample_index) / labels_per_image
                progress = 10 * (int(100*progress)/10)
                if progress != current_progress:
                    current_progress = progress
                    sys.stdout.write( "{}% ".format( current_progress ) )
                    sys.stdout.flush()

            sys.stdout.write( "100%\n" )
            # Write into the operator
            label_input_slot[fullSlicing(shape)] = random_labels
        
        logger.info( "Done injecting labels" )
Ejemplo n.º 40
0
class PixelClassificationWorkflow(Workflow):
    
    workflowName = "Pixel Classification"
    workflowDescription = "This is obviously self-explanoratory."
    defaultAppletIndex = 1 # show DataSelection by default
    
    @property
    def applets(self):
        return self._applets

    @property
    def imageNameListSlot(self):
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def __init__(self, shell, headless, workflow_cmdline_args, appendBatchOperators=True, *args, **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()
        super( PixelClassificationWorkflow, self ).__init__( shell, headless, graph=graph, *args, **kwargs )
        self._applets = []
        self._workflow_cmdline_args = workflow_cmdline_args

        data_instructions = "Select your input data using the 'Raw Data' tab shown on the right"

        # Parse workflow-specific command-line args
        parser = argparse.ArgumentParser()
        parser.add_argument('--filter', help="pixel feature filter implementation.", choices=['Original', 'Refactored', 'Interpolated'], default='Original')
        parsed_args, unused_args = parser.parse_known_args(workflow_cmdline_args)
        self.filter_implementation = parsed_args.filter
        
        # Applets for training (interactive) workflow 
        self.projectMetadataApplet = ProjectMetadataApplet()
        self.dataSelectionApplet = DataSelectionApplet( self,
                                                        "Input Data",
                                                        "Input Data",
                                                        supportIlastik05Import=True,
                                                        batchDataGui=False,
                                                        instructionText=data_instructions )
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( ['Raw Data'] )

        self.featureSelectionApplet = FeatureSelectionApplet(self, "Feature Selection", "FeatureSelections", self.filter_implementation)

        self.pcApplet = PixelClassificationApplet(self, "PixelClassification")
        opClassify = self.pcApplet.topLevelOperator

        self.dataExportApplet = PixelClassificationDataExportApplet(self, "Prediction Export")
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.PmapColors.connect( opClassify.PmapColors )
        opDataExport.LabelNames.connect( opClassify.LabelNames )
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )

        # Expose for shell
        self._applets.append(self.projectMetadataApplet)
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.featureSelectionApplet)
        self._applets.append(self.pcApplet)
        self._applets.append(self.dataExportApplet)

        self._batch_input_args = None
        self._batch_export_args = None

        self.batchInputApplet = None
        self.batchResultsApplet = None
        if appendBatchOperators:
            # Create applets for batch workflow
            self.batchInputApplet = DataSelectionApplet(self, "Batch Prediction Input Selections", "Batch Inputs", supportIlastik05Import=False, batchDataGui=True)
            self.batchResultsApplet = PixelClassificationDataExportApplet(self, "Batch Prediction Output Locations", isBatch=True)
    
            # Expose in shell        
            self._applets.append(self.batchInputApplet)
            self._applets.append(self.batchResultsApplet)
    
            # Connect batch workflow (NOT lane-based)
            self._initBatchWorkflow()

            if unused_args:
                # We parse the export setting args first.  All remaining args are considered input files by the input applet.
                self._batch_export_args, unused_args = self.batchResultsApplet.parse_known_cmdline_args( unused_args )
                self._batch_input_args, unused_args = self.batchInputApplet.parse_known_cmdline_args( unused_args )
    
        if unused_args:
            logger.warn("Unused command-line args: {}".format( unused_args ))

    def connectLane(self, laneIndex):
        # Get a handle to each operator
        opData = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex)
        opTrainingFeatures = self.featureSelectionApplet.topLevelOperator.getLane(laneIndex)
        opClassify = self.pcApplet.topLevelOperator.getLane(laneIndex)
        opDataExport = self.dataExportApplet.topLevelOperator.getLane(laneIndex)
        
        # Input Image -> Feature Op
        #         and -> Classification Op (for display)
        opTrainingFeatures.InputImage.connect( opData.Image )
        opClassify.InputImages.connect( opData.Image )
        
        # Feature Images -> Classification Op (for training, prediction)
        opClassify.FeatureImages.connect( opTrainingFeatures.OutputImage )
        opClassify.CachedFeatureImages.connect( opTrainingFeatures.CachedOutputImage )
        
        # Training flags -> Classification Op (for GUI restrictions)
        opClassify.LabelsAllowedFlags.connect( opData.AllowLabels )

        # Data Export connections
        opDataExport.RawData.connect( opData.ImageGroup[0] )
        opDataExport.Input.connect( opClassify.HeadlessPredictionProbabilities )
        opDataExport.RawDatasetInfo.connect( opData.DatasetGroup[0] )
        opDataExport.ConstraintDataset.connect( opData.ImageGroup[0] )

    def _initBatchWorkflow(self):
        """
        Connect the batch-mode top-level operators to the training workflow and to each other.
        """
        # Access applet operators from the training workflow
        opTrainingDataSelection = self.dataSelectionApplet.topLevelOperator
        opTrainingFeatures = self.featureSelectionApplet.topLevelOperator
        opClassify = self.pcApplet.topLevelOperator
        
        # Access the batch operators
        opBatchInputs = self.batchInputApplet.topLevelOperator
        opBatchResults = self.batchResultsApplet.topLevelOperator
        
        opBatchInputs.DatasetRoles.connect( opTrainingDataSelection.DatasetRoles )
        
        opSelectFirstLane = OperatorWrapper( OpSelectSubslot, parent=self )
        opSelectFirstLane.Inputs.connect( opTrainingDataSelection.ImageGroup )
        opSelectFirstLane.SubslotIndex.setValue(0)
        
        opSelectFirstRole = OpSelectSubslot( parent=self )
        opSelectFirstRole.Inputs.connect( opSelectFirstLane.Output )
        opSelectFirstRole.SubslotIndex.setValue(0)
        
        opBatchResults.ConstraintDataset.connect( opSelectFirstRole.Output )
        
        ## Create additional batch workflow operators
        opBatchFeatures = OperatorWrapper( OpFeatureSelectionNoCache, operator_kwargs={'filter_implementation': self.filter_implementation}, parent=self, promotedSlotNames=['InputImage'] )
        opBatchPredictionPipeline = OperatorWrapper( OpPredictionPipelineNoCache, parent=self )
        
        ## Connect Operators ##
        opTranspose = OpTransposeSlots( parent=self )
        opTranspose.OutputLength.setValue(1)
        opTranspose.Inputs.connect( opBatchInputs.DatasetGroup )
        
        # Provide dataset paths from data selection applet to the batch export applet
        opBatchResults.RawDatasetInfo.connect( opTranspose.Outputs[0] )
        opBatchResults.WorkingDirectory.connect( opBatchInputs.WorkingDirectory )
        
        # Connect (clone) the feature operator inputs from 
        #  the interactive workflow's features operator (which gets them from the GUI)
        opBatchFeatures.Scales.connect( opTrainingFeatures.Scales )
        opBatchFeatures.FeatureIds.connect( opTrainingFeatures.FeatureIds )
        opBatchFeatures.SelectionMatrix.connect( opTrainingFeatures.SelectionMatrix )
        
        # Classifier and NumClasses are provided by the interactive workflow
        opBatchPredictionPipeline.Classifier.connect( opClassify.Classifier )
        opBatchPredictionPipeline.FreezePredictions.setValue( False )
        opBatchPredictionPipeline.NumClasses.connect( opClassify.NumClasses )
        
        # Provide these for the gui
        opBatchResults.RawData.connect( opBatchInputs.Image )
        opBatchResults.PmapColors.connect( opClassify.PmapColors )
        opBatchResults.LabelNames.connect( opClassify.LabelNames )
        
        # Connect Image pathway:
        # Input Image -> Features Op -> Prediction Op -> Export
        opBatchFeatures.InputImage.connect( opBatchInputs.Image )
        opBatchPredictionPipeline.FeatureImages.connect( opBatchFeatures.OutputImage )
        opBatchResults.Input.connect( opBatchPredictionPipeline.HeadlessPredictionProbabilities )

        # We don't actually need the cached path in the batch pipeline.
        # Just connect the uncached features here to satisfy the operator.
        #opBatchPredictionPipeline.CachedFeatureImages.connect( opBatchFeatures.OutputImage )

        self.opBatchPredictionPipeline = opBatchPredictionPipeline

    def handleAppletStateUpdateRequested(self):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.appletStateUpdateRequested`
        """
        # If no data, nothing else is ready.
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        input_ready = len(opDataSelection.ImageGroup) > 0 and not self.dataSelectionApplet.busy

        opFeatureSelection = self.featureSelectionApplet.topLevelOperator
        featureOutput = opFeatureSelection.OutputImage
        features_ready = input_ready and \
                         len(featureOutput) > 0 and  \
                         featureOutput[0].ready() and \
                         (TinyVector(featureOutput[0].meta.shape) > 0).all()

        opDataExport = self.dataExportApplet.topLevelOperator
        predictions_ready = features_ready and \
                            len(opDataExport.Input) > 0 and \
                            opDataExport.Input[0].ready() and \
                            (TinyVector(opDataExport.Input[0].meta.shape) > 0).all()

        # Problems can occur if the features or input data are changed during live update mode.
        # Don't let the user do that.
        opPixelClassification = self.pcApplet.topLevelOperator
        live_update_active = not opPixelClassification.FreezePredictions.value

        self._shell.setAppletEnabled(self.dataSelectionApplet, not live_update_active)
        self._shell.setAppletEnabled(self.featureSelectionApplet, input_ready and not live_update_active)
        self._shell.setAppletEnabled(self.pcApplet, features_ready)
        self._shell.setAppletEnabled(self.dataExportApplet, predictions_ready)

        if self.batchInputApplet is not None:
            # Training workflow must be fully configured before batch can be used
            self._shell.setAppletEnabled(self.batchInputApplet, predictions_ready)
    
            opBatchDataSelection = self.batchInputApplet.topLevelOperator
            batch_input_ready = predictions_ready and \
                                len(opBatchDataSelection.ImageGroup) > 0
            self._shell.setAppletEnabled(self.batchResultsApplet, batch_input_ready)
            
        # Lastly, check for certain "busy" conditions, during which we 
        #  should prevent the shell from closing the project.
        busy = False
        busy |= self.dataSelectionApplet.busy
        busy |= self.featureSelectionApplet.busy
        busy |= self.dataExportApplet.busy
        self._shell.enableProjectChanges( not busy )

    def getHeadlessOutputSlot(self, slotId):
        # "Regular" (i.e. with the images that the user selected as input data)
        if slotId == "Predictions":
            return self.pcApplet.topLevelOperator.HeadlessPredictionProbabilities
        elif slotId == "PredictionsUint8":
            return self.pcApplet.topLevelOperator.HeadlessUint8PredictionProbabilities
        # "Batch" (i.e. with the images that the user selected as batch inputs).
        elif slotId == "BatchPredictions":
            return self.opBatchPredictionPipeline.HeadlessPredictionProbabilities
        if slotId == "BatchPredictionsUint8":
            return self.opBatchPredictionPipeline.HeadlessUint8PredictionProbabilities
        
        raise Exception("Unknown headless output slot")
    
    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.
        
        If the user provided command-line arguments, use them to configure 
        the workflow for batch mode and export all results.
        (This workflow's headless mode supports only batch mode for now.)
        """
        # Configure the batch data selection operator.
        if self._batch_input_args and self._batch_input_args.input_files: 
            self.batchInputApplet.configure_operator_with_parsed_args( self._batch_input_args )
        
        # Configure the data export operator.
        if self._batch_export_args:
            self.batchResultsApplet.configure_operator_with_parsed_args( self._batch_export_args )

        if self._headless and self._batch_input_args and self._batch_export_args:
            
            # Make sure we're using the up-to-date classifier.
            self.pcApplet.topLevelOperator.FreezePredictions.setValue(False)
        
            # Now run the batch export and report progress....
            opBatchDataExport = self.batchResultsApplet.topLevelOperator
            for i, opExportDataLaneView in enumerate(opBatchDataExport):
                print "Exporting result {} to {}".format(i, opExportDataLaneView.ExportPath.value)
    
                sys.stdout.write( "Result {}/{} Progress: ".format( i, len( opBatchDataExport ) ) )
                def print_progress( progress ):
                    sys.stdout.write( "{} ".format( progress ) )
    
                # If the operator provides a progress signal, use it.
                slotProgressSignal = opExportDataLaneView.progressSignal
                slotProgressSignal.subscribe( print_progress )
                opExportDataLaneView.run_export()
                
                # Finished.
                sys.stdout.write("\n")
Ejemplo n.º 41
0
    def __init__(self, shell, headless, workflow_cmdline_args, appendBatchOperators=True, *args, **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()
        super( PixelClassificationWorkflow, self ).__init__( shell, headless, graph=graph, *args, **kwargs )
        self._applets = []
        self._workflow_cmdline_args = workflow_cmdline_args

        data_instructions = "Select your input data using the 'Raw Data' tab shown on the right"

        # Parse workflow-specific command-line args
        parser = argparse.ArgumentParser()
        parser.add_argument('--filter', help="pixel feature filter implementation.", choices=['Original', 'Refactored', 'Interpolated'], default='Original')
        parsed_args, unused_args = parser.parse_known_args(workflow_cmdline_args)
        self.filter_implementation = parsed_args.filter
        
        # Applets for training (interactive) workflow 
        self.projectMetadataApplet = ProjectMetadataApplet()
        self.dataSelectionApplet = DataSelectionApplet( self,
                                                        "Input Data",
                                                        "Input Data",
                                                        supportIlastik05Import=True,
                                                        batchDataGui=False,
                                                        instructionText=data_instructions )
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( ['Raw Data'] )

        self.featureSelectionApplet = FeatureSelectionApplet(self, "Feature Selection", "FeatureSelections", self.filter_implementation)

        self.pcApplet = PixelClassificationApplet(self, "PixelClassification")
        opClassify = self.pcApplet.topLevelOperator

        self.dataExportApplet = PixelClassificationDataExportApplet(self, "Prediction Export")
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.PmapColors.connect( opClassify.PmapColors )
        opDataExport.LabelNames.connect( opClassify.LabelNames )
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )

        # Expose for shell
        self._applets.append(self.projectMetadataApplet)
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.featureSelectionApplet)
        self._applets.append(self.pcApplet)
        self._applets.append(self.dataExportApplet)

        self._batch_input_args = None
        self._batch_export_args = None

        self.batchInputApplet = None
        self.batchResultsApplet = None
        if appendBatchOperators:
            # Create applets for batch workflow
            self.batchInputApplet = DataSelectionApplet(self, "Batch Prediction Input Selections", "Batch Inputs", supportIlastik05Import=False, batchDataGui=True)
            self.batchResultsApplet = PixelClassificationDataExportApplet(self, "Batch Prediction Output Locations", isBatch=True)
    
            # Expose in shell        
            self._applets.append(self.batchInputApplet)
            self._applets.append(self.batchResultsApplet)
    
            # Connect batch workflow (NOT lane-based)
            self._initBatchWorkflow()

            if unused_args:
                # We parse the export setting args first.  All remaining args are considered input files by the input applet.
                self._batch_export_args, unused_args = self.batchResultsApplet.parse_known_cmdline_args( unused_args )
                self._batch_input_args, unused_args = self.batchInputApplet.parse_known_cmdline_args( unused_args )
    
        if unused_args:
            logger.warn("Unused command-line args: {}".format( unused_args ))
class ObjectClassificationWorkflow(Workflow):
    workflowName = "Object Classification Workflow Base"
    defaultAppletIndex = 1 # show DataSelection by default

    def __init__(self, shell, headless,
                 workflow_cmdline_args,
                 project_creation_args,
                 *args, **kwargs):
        graph = kwargs['graph'] if 'graph' in kwargs else Graph()
        if 'graph' in kwargs:
            del kwargs['graph']
        super(ObjectClassificationWorkflow, self).__init__(shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs)

        # Parse workflow-specific command-line args
        parser = argparse.ArgumentParser()
        parser.add_argument('--fillmissing', help="use 'fill missing' applet with chosen detection method", choices=['classic', 'svm', 'none'], default='none')
        parser.add_argument('--filter', help="pixel feature filter implementation.", choices=['Original', 'Refactored', 'Interpolated'], default='Original')
        parser.add_argument('--nobatch', help="do not append batch applets", action='store_true', default=False)
        
        parsed_creation_args, unused_args = parser.parse_known_args(project_creation_args)

        self.fillMissing = parsed_creation_args.fillmissing
        self.filter_implementation = parsed_creation_args.filter

        parsed_args, unused_args = parser.parse_known_args(workflow_cmdline_args)
        if parsed_args.fillmissing != 'none' and parsed_creation_args.fillmissing != parsed_args.fillmissing:
            logger.error( "Ignoring --fillmissing cmdline arg.  Can't specify a different fillmissing setting after the project has already been created." )
        
        if parsed_args.filter != 'Original' and parsed_creation_args.filter != parsed_args.filter:
            logger.error( "Ignoring --filter cmdline arg.  Can't specify a different filter setting after the project has already been created." )

        self.batch = not parsed_args.nobatch

        self._applets = []

        self.projectMetadataApplet = ProjectMetadataApplet()
        self._applets.append(self.projectMetadataApplet)

        self.setupInputs()
        
        if self.fillMissing != 'none':
            self.fillMissingSlicesApplet = FillMissingSlicesApplet(
                self, "Fill Missing Slices", "Fill Missing Slices", self.fillMissing)
            self._applets.append(self.fillMissingSlicesApplet)

        if isinstance(self, ObjectClassificationWorkflowPixel):
            self.input_types = 'raw'
        elif isinstance(self, ObjectClassificationWorkflowBinary):
            self.input_types = 'raw+binary'
        elif isinstance( self, ObjectClassificationWorkflowPrediction ):
            self.input_types = 'raw+pmaps'
        
        # our main applets
        self.objectExtractionApplet = ObjectExtractionApplet(workflow=self, name = "Object Feature Selection")
        self.objectClassificationApplet = ObjectClassificationApplet(workflow=self)
        self.dataExportApplet = ObjectClassificationDataExportApplet(self, "Object Prediction Export")
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( self.dataSelectionApplet.topLevelOperator.WorkingDirectory )
        
        # See EXPORT_SELECTION_PREDICTIONS and EXPORT_SELECTION_PROBABILITIES, above
        opDataExport.SelectionNames.setValue( ['Object Predictions', 'Object Probabilities'] )        
        if self.input_types == 'raw':
            # Re-configure to add the pixel probabilities option
            # See EXPORT_SELECTION_PIXEL_PROBABILITIES, above
            opDataExport.SelectionNames.setValue( ['Object Predictions', 'Object Probabilities', 'Pixel Probabilities'] )

        self._applets.append(self.objectExtractionApplet)
        self._applets.append(self.objectClassificationApplet)
        self._applets.append(self.dataExportApplet)

        if self.batch:
            self.dataSelectionAppletBatch = DataSelectionApplet(
                    self, "Batch Inputs", "Batch Inputs", batchDataGui=True)
            self.opDataSelectionBatch = self.dataSelectionAppletBatch.topLevelOperator
            
            if self.input_types == 'raw':
                self.opDataSelectionBatch.DatasetRoles.setValue(['Raw Data'])
            elif self.input_types == 'raw+binary':
                self.opDataSelectionBatch.DatasetRoles.setValue(['Raw Data', 'Binary Data'])
            elif self.input_types == 'raw+pmaps':
                self.opDataSelectionBatch.DatasetRoles.setValue(['Raw Data', 'Prediction Maps'])
            else:
                assert False, "Unknown object classification subclass type."
    
            self.blockwiseObjectClassificationApplet = BlockwiseObjectClassificationApplet(
                self, "Blockwise Object Classification", "Blockwise Object Classification")
            self._applets.append(self.blockwiseObjectClassificationApplet)

            self.batchExportApplet = ObjectClassificationDataExportApplet(
                self, "Batch Object Prediction Export", isBatch=True)
        
            opBatchDataExport = self.batchExportApplet.topLevelOperator
            opBatchDataExport.WorkingDirectory.connect( self.dataSelectionApplet.topLevelOperator.WorkingDirectory )

            self._applets.append(self.dataSelectionAppletBatch)
            self._applets.append(self.batchExportApplet)

            self._initBatchWorkflow()

            if unused_args:
                # Additional export args (specific to the object classification workflow)
                export_arg_parser = argparse.ArgumentParser()
                export_arg_parser.add_argument( "--table_filename", help="The location to export the object feature/prediction CSV file.", required=False )
                export_arg_parser.add_argument( "--export_object_prediction_img", action="store_true" )
                export_arg_parser.add_argument( "--export_object_probability_img", action="store_true" )

                # TODO: Support this, too, someday?
                #export_arg_parser.add_argument( "--export_object_label_img", action="store_true" )
                
                if self.input_types == 'raw':
                    export_arg_parser.add_argument( "--export_pixel_probability_img", action="store_true" )
                self._export_args, unused_args = export_arg_parser.parse_known_args(unused_args)

                # We parse the export setting args first.  All remaining args are considered input files by the input applet.
                self._batch_export_args, unused_args = self.batchExportApplet.parse_known_cmdline_args( unused_args )
                self._batch_input_args, unused_args = self.dataSelectionAppletBatch.parse_known_cmdline_args( unused_args )

        if unused_args:
            warnings.warn("Unused command-line args: {}".format( unused_args ))

    @property
    def applets(self):
        return self._applets

    @property
    def imageNameListSlot(self):
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def connectLane(self, laneIndex):
        rawslot, binaryslot = self.connectInputs(laneIndex)

        opData = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex)

        opObjExtraction = self.objectExtractionApplet.topLevelOperator.getLane(laneIndex)
        opObjClassification = self.objectClassificationApplet.topLevelOperator.getLane(laneIndex)
        opDataExport = self.dataExportApplet.topLevelOperator.getLane(laneIndex)

        opObjExtraction.RawImage.connect(rawslot)
        opObjExtraction.BinaryImage.connect(binaryslot)

        opObjClassification.RawImages.connect(rawslot)
        opObjClassification.LabelsAllowedFlags.connect(opData.AllowLabels)
        opObjClassification.BinaryImages.connect(binaryslot)

        opObjClassification.SegmentationImages.connect(opObjExtraction.LabelImage)
        opObjClassification.ObjectFeatures.connect(opObjExtraction.RegionFeatures)
        opObjClassification.ComputedFeatureNames.connect(opObjExtraction.ComputedFeatureNames)

        # Data Export connections
        opDataExport.RawData.connect( opData.ImageGroup[0] )
        opDataExport.RawDatasetInfo.connect( opData.DatasetGroup[0] )
        opDataExport.Inputs.resize(2)
        opDataExport.Inputs[EXPORT_SELECTION_PREDICTIONS].connect( opObjClassification.UncachedPredictionImages )
        opDataExport.Inputs[EXPORT_SELECTION_PROBABILITIES].connect( opObjClassification.ProbabilityChannelImage )
        if self.input_types == 'raw':
            # Append the prediction probabilities to the list of slots that can be exported.
            opDataExport.Inputs.resize(3)
            # Pull from this slot since the data has already been through the Op5 operator
            # (All data in the export operator must have matching spatial dimensions.)
            opThreshold = self.thresholdingApplet.topLevelOperator.getLane(laneIndex)
            opDataExport.Inputs[EXPORT_SELECTION_PIXEL_PROBABILITIES].connect( opThreshold.InputImage )

        if self.batch:
            opObjClassification = self.objectClassificationApplet.topLevelOperator.getLane(laneIndex)
            opBlockwiseObjectClassification = self.blockwiseObjectClassificationApplet.topLevelOperator.getLane(laneIndex)

            opBlockwiseObjectClassification.RawImage.connect(opObjClassification.RawImages)
            opBlockwiseObjectClassification.BinaryImage.connect(opObjClassification.BinaryImages)
            opBlockwiseObjectClassification.Classifier.connect(opObjClassification.Classifier)
            opBlockwiseObjectClassification.LabelsCount.connect(opObjClassification.NumLabels)
            opBlockwiseObjectClassification.SelectedFeatures.connect(opObjClassification.SelectedFeatures)

    def _initBatchWorkflow(self):
        # Access applet operators from the training workflow
        opObjectTrainingTopLevel = self.objectClassificationApplet.topLevelOperator
        opBlockwiseObjectClassification = self.blockwiseObjectClassificationApplet.topLevelOperator

        # If we are not in the binary workflow, connect the thresholding operator.
        # Parameter inputs are cloned from the interactive workflow,
        if isinstance(self, ObjectClassificationWorkflowBinary):
            #FIXME
            pass
        else:
            opInteractiveThreshold = self.thresholdingApplet.topLevelOperator
            opBatchThreshold = OperatorWrapper(OpThresholdTwoLevels, parent=self)
            opBatchThreshold.MinSize.connect(opInteractiveThreshold.MinSize)
            opBatchThreshold.MaxSize.connect(opInteractiveThreshold.MaxSize)
            opBatchThreshold.HighThreshold.connect(opInteractiveThreshold.HighThreshold)
            opBatchThreshold.LowThreshold.connect(opInteractiveThreshold.LowThreshold)
            opBatchThreshold.SingleThreshold.connect(opInteractiveThreshold.SingleThreshold)
            opBatchThreshold.SmootherSigma.connect(opInteractiveThreshold.SmootherSigma)
            opBatchThreshold.Channel.connect(opInteractiveThreshold.Channel)
            opBatchThreshold.CurOperator.connect(opInteractiveThreshold.CurOperator)

        # OpDataSelectionGroup.ImageGroup is indexed by [laneIndex][roleIndex],
        # but we need a slot that is indexed by [roleIndex][laneIndex]
        # so we can pass each role to the appropriate slots.
        # We use OpTransposeSlots to do this.
        opBatchInputByRole = OpTransposeSlots( parent=self )
        opBatchInputByRole.Inputs.connect( self.opDataSelectionBatch.ImageGroup )
        opBatchInputByRole.OutputLength.setValue(2)
        
        # Lane-indexed multislot for raw data
        batchInputsRaw = opBatchInputByRole.Outputs[0]
        # Lane-indexed multislot for binary/prediction-map data
        batchInputsOther = opBatchInputByRole.Outputs[1]

        # Connect the blockwise classification operator
        # Parameter inputs are cloned from the interactive workflow,
        opBatchClassify = OperatorWrapper(OpBlockwiseObjectClassification, parent=self,
                                          promotedSlotNames=['RawImage', 'BinaryImage'])
        opBatchClassify.Classifier.connect(opObjectTrainingTopLevel.Classifier)
        opBatchClassify.LabelsCount.connect(opObjectTrainingTopLevel.NumLabels)
        opBatchClassify.SelectedFeatures.connect(opObjectTrainingTopLevel.SelectedFeatures)
        opBatchClassify.BlockShape3dDict.connect(opBlockwiseObjectClassification.BlockShape3dDict)
        opBatchClassify.HaloPadding3dDict.connect(opBlockwiseObjectClassification.HaloPadding3dDict)

        #  but image pathway is from the batch pipeline
        op5Raw = OperatorWrapper(OpReorderAxes, parent=self)
        
        if self.fillMissing != 'none':
            opBatchFillMissingSlices = OperatorWrapper(OpFillMissingSlicesNoCache, parent=self)
            opBatchFillMissingSlices.Input.connect(batchInputsRaw)
            op5Raw.Input.connect(opBatchFillMissingSlices.Output)
        else:
            op5Raw.Input.connect(batchInputsRaw)
        
        
        op5Binary = OperatorWrapper(OpReorderAxes, parent=self)
        if self.input_types != 'raw+binary':
            op5Pred = OperatorWrapper(OpReorderAxes, parent=self)
            op5Pred.Input.connect(batchInputsOther)
            opBatchThreshold.RawInput.connect(op5Raw.Output)
            opBatchThreshold.InputImage.connect(op5Pred.Output)
            op5Binary.Input.connect(opBatchThreshold.Output)
        else:
            op5Binary.Input.connect(batchInputsOther)

        opBatchClassify.RawImage.connect(op5Raw.Output)
        opBatchClassify.BinaryImage.connect(op5Binary.Output)

        self.opBatchClassify = opBatchClassify

        # We need to transpose the dataset group, because it is indexed by [image_index][group_index]
        # But we want it to be indexed by [group_index][image_index] for the RawDatasetInfo connection, below.
        opTransposeDatasetGroup = OpTransposeSlots( parent=self )
        opTransposeDatasetGroup.OutputLength.setValue(1)
        opTransposeDatasetGroup.Inputs.connect( self.opDataSelectionBatch.DatasetGroup )

        # Connect the batch OUTPUT applet
        opBatchExport = self.batchExportApplet.topLevelOperator
        opBatchExport.RawData.connect( batchInputsRaw )
        opBatchExport.RawDatasetInfo.connect( opTransposeDatasetGroup.Outputs[0] )
        
        # See EXPORT_SELECTION_PREDICTIONS and EXPORT_SELECTION_PROBABILITIES, above
        opBatchExport.SelectionNames.setValue( ['Object Predictions', 'Object Probabilities'] )        
        # opBatchResults.Inputs is indexed by [lane][selection],
        # Use OpTranspose to allow connection.
        opTransposeBatchInputs = OpTransposeSlots( parent=self )
        opTransposeBatchInputs.OutputLength.setValue(0)
        opTransposeBatchInputs.Inputs.resize(2)
        opTransposeBatchInputs.Inputs[EXPORT_SELECTION_PREDICTIONS].connect( opBatchClassify.PredictionImage ) # selection 0
        opTransposeBatchInputs.Inputs[EXPORT_SELECTION_PROBABILITIES].connect( opBatchClassify.ProbabilityChannelImage ) # selection 1
        
        # Now opTransposeBatchInputs.Outputs is level-2 indexed by [lane][selection]
        opBatchExport.Inputs.connect( opTransposeBatchInputs.Outputs )

    def onProjectLoaded(self, projectManager):
        if self._headless and self._batch_input_args and self._batch_export_args:
            
            # Check for problems: Is the project file ready to use?
            opObjClassification = self.objectClassificationApplet.topLevelOperator
            if not opObjClassification.Classifier.ready():
                logger.error( "Can't run batch prediction.\n"
                              "Couldn't obtain a classifier from your project file: {}.\n"
                              "Please make sure your project is fully configured with a trained classifier."
                              .format(projectManager.currentProjectPath) )
                return

            # Configure the batch data selection operator.
            if self._batch_input_args and self._batch_input_args.raw_data:
                self.dataSelectionAppletBatch.configure_operator_with_parsed_args( self._batch_input_args )
            
            # Configure the data export operator.
            if self._batch_export_args:
                self.batchExportApplet.configure_operator_with_parsed_args( self._batch_export_args )

            self.opBatchClassify.BlockShape3dDict.disconnect()

            # For each BATCH lane...
            for lane_index, opBatchClassifyView in enumerate(self.opBatchClassify):
                # Force the block size to be the same as image size (1 big block)
                tagged_shape = opBatchClassifyView.RawImage.meta.getTaggedShape()
                try:
                    tagged_shape.pop('t')
                except KeyError:
                    pass
                try:
                    tagged_shape.pop('c')
                except KeyError:
                    pass
                opBatchClassifyView.BlockShape3dDict.setValue( tagged_shape )

                # For now, we force the entire result to be computed as one big block.
                # Force the batch classify op to create an internal pipeline for our block.
                opBatchClassifyView._ensurePipelineExists( (0,0,0,0,0) )
                opSingleBlockClassify = opBatchClassifyView._blockPipelines[(0,0,0,0,0)]

                # Export the images (if any)
                if self.input_types == 'raw':
                    # If pixel probabilities need export, do that first.
                    # (They are needed by the other outputs, anyway)
                    if self._export_args.export_pixel_probability_img:
                        self._export_batch_image( lane_index, EXPORT_SELECTION_PIXEL_PROBABILITIES, 'pixel-probability-img' )
                if self._export_args.export_object_prediction_img:
                    self._export_batch_image( lane_index, EXPORT_SELECTION_PREDICTIONS, 'object-prediction-img' )
                if self._export_args.export_object_probability_img:
                    self._export_batch_image( lane_index, EXPORT_SELECTION_PROBABILITIES, 'object-probability-img' )

                # Export the CSV
                csv_filename = self._export_args.table_filename
                if csv_filename:
                    feature_table = opSingleBlockClassify._opPredict.createExportTable([])
                    if len(self.opBatchClassify) > 1:
                        base, ext = os.path.splitext( csv_filename )
                        csv_filename = base + '-' + str(lane_index) + ext
                    print "Exporting object table for image #{}:\n{}".format( lane_index, csv_filename )
                    self.record_array_to_csv(feature_table, csv_filename)
                
                print "FINISHED."

    def _export_batch_image(self, lane_index, selection_index, selection_name):
        opBatchExport = self.batchExportApplet.topLevelOperator
        opBatchExport.InputSelection.setValue(selection_index)
        opBatchExportView = opBatchExport.getLane(lane_index)

        # Remember this so we can restore it later
        default_output_path = opBatchExport.OutputFilenameFormat.value
        export_path = opBatchExportView.ExportPath.value

        path_comp = PathComponents( export_path, os.getcwd() )
        path_comp.filenameBase += '-' + selection_name
        opBatchExport.OutputFilenameFormat.setValue( path_comp.externalPath )
        
        logger.info( "Exporting {} for image #{} to {}"
                     .format(selection_name, lane_index+1, opBatchExportView.ExportPath.value) )

        sys.stdout.write( "Result {}/{} Progress: "
                          .format( lane_index+1, len( self.opBatchClassify ) ) )
        sys.stdout.flush()
        def print_progress( progress ):
            sys.stdout.write( "{} ".format( progress ) )
            sys.stdout.flush()

        # If the operator provides a progress signal, use it.
        slotProgressSignal = opBatchExportView.progressSignal
        slotProgressSignal.subscribe( print_progress )
        opBatchExportView.run_export()
        
        # Finished.
        sys.stdout.write("\n")
        
        # Restore original format
        opBatchExport.OutputFilenameFormat.setValue( default_output_path )

    def record_array_to_csv(self, record_array, filename):
        """
        Save the given record array to a CSV file.
        """
        # Sort by offset
        with open(filename, 'w') as csv_file:
            sorted_fields = sorted( record_array.dtype.fields.items(), key=lambda (k,v): v[1] )
            field_names = map( lambda (k,v): k, sorted_fields )
            for name in field_names:
                # Remove any commas in the header (this is csv, after all)
                name = name.replace(',', '/')
                csv_file.write(name + ',')
            csv_file.write('\n')
            for row in record_array:
                for name in field_names:
                    csv_file.write(str(row[name]) + ',')
                csv_file.write('\n')

    def getHeadlessOutputSlot(self, slotId):
        if slotId == "BatchPredictionImage":
            return self.opBatchClassify.PredictionImage
        raise Exception("Unknown headless output slot")

    def getSecondaryHeadlessOutputSlots(self, slotId):
        if slotId == "BatchPredictionImage":
            return [self.opBatchClassify.BlockwiseRegionFeatures]
        raise Exception("Unknown headless output slot")

    def handleAppletStateUpdateRequested(self, upstream_ready=False):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.appletStateUpdateRequested`
        
        This method will be called by the child classes with the result of their
        own applet readyness findings as keyword argument.
        """

        # all workflows have these applets in common:

        # object feature selection
        # object classification
        # object prediction export
        # blockwise classification
        # batch input
        # batch prediction export

        cumulated_readyness = upstream_ready
        self._shell.setAppletEnabled(self.objectExtractionApplet, cumulated_readyness)

        if len(self.objectExtractionApplet.topLevelOperator.ComputedFeatureNames) == 0:
            object_features_ready = False
        else:
            object_features_ready = True
            for slot in self.objectExtractionApplet.topLevelOperator.ComputedFeatureNames:
                object_features_ready = object_features_ready and len(slot.value) > 0
        #object_features_ready = self.objectExtractionApplet.topLevelOperator.RegionFeatures.ready()

        cumulated_readyness = cumulated_readyness and object_features_ready
        self._shell.setAppletEnabled(self.objectClassificationApplet, cumulated_readyness)

        object_classification_ready = \
            self.objectClassificationApplet.predict_enabled

        cumulated_readyness = cumulated_readyness and object_classification_ready
        self._shell.setAppletEnabled(self.dataExportApplet, cumulated_readyness)

        if self.batch:
            object_prediction_ready = True  # TODO is that so?
            cumulated_readyness = cumulated_readyness and object_prediction_ready

            self._shell.setAppletEnabled(self.blockwiseObjectClassificationApplet, cumulated_readyness)
            self._shell.setAppletEnabled(self.dataSelectionAppletBatch, cumulated_readyness)
            self._shell.setAppletEnabled(self.batchExportApplet, cumulated_readyness)

        # Lastly, check for certain "busy" conditions, during which we 
        # should prevent the shell from closing the project.
        #TODO implement
        busy = False
        self._shell.enableProjectChanges( not busy )

    def _inputReady(self, nRoles):
        slot = self.dataSelectionApplet.topLevelOperator.ImageGroup
        if len(slot) > 0:
            input_ready = True
            for sub in slot:
                input_ready = input_ready and \
                    all([sub[i].ready() for i in range(nRoles)])
        else:
            input_ready = False

        return input_ready
class ObjectClassificationWorkflow(Workflow):
    workflowName = "Object Classification Workflow Base"
    defaultAppletIndex = 1 # show DataSelection by default

    def __init__(self, shell, headless,
                 workflow_cmdline_args,
                 project_creation_args,
                 *args, **kwargs):
        graph = kwargs['graph'] if 'graph' in kwargs else Graph()
        if 'graph' in kwargs:
            del kwargs['graph']
        super(ObjectClassificationWorkflow, self).__init__(shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs)

        # Parse workflow-specific command-line args
        parser = argparse.ArgumentParser()
        parser.add_argument('--fillmissing', help="use 'fill missing' applet with chosen detection method", choices=['classic', 'svm', 'none'], default='none')
        parser.add_argument('--filter', help="pixel feature filter implementation.", choices=['Original', 'Refactored', 'Interpolated'], default='Original')
        parser.add_argument('--nobatch', help="do not append batch applets", action='store_true', default=False)
        
        parsed_creation_args, unused_args = parser.parse_known_args(project_creation_args)

        self.fillMissing = parsed_creation_args.fillmissing
        self.filter_implementation = parsed_creation_args.filter

        parsed_args, unused_args = parser.parse_known_args(workflow_cmdline_args)
        if parsed_args.fillmissing != 'none' and parsed_creation_args.fillmissing != parsed_args.fillmissing:
            logger.error( "Ignoring --fillmissing cmdline arg.  Can't specify a different fillmissing setting after the project has already been created." )
        
        if parsed_args.filter != 'Original' and parsed_creation_args.filter != parsed_args.filter:
            logger.error( "Ignoring --filter cmdline arg.  Can't specify a different filter setting after the project has already been created." )

        self.batch = not parsed_args.nobatch

        self._applets = []

        self.projectMetadataApplet = ProjectMetadataApplet()
        self._applets.append(self.projectMetadataApplet)

        self.setupInputs()
        
        if self.fillMissing != 'none':
            self.fillMissingSlicesApplet = FillMissingSlicesApplet(
                self, "Fill Missing Slices", "Fill Missing Slices", self.fillMissing)
            self._applets.append(self.fillMissingSlicesApplet)

        if isinstance(self, ObjectClassificationWorkflowPixel):
            self.input_types = 'raw'
        elif isinstance(self, ObjectClassificationWorkflowBinary):
            self.input_types = 'raw+binary'
        elif isinstance( self, ObjectClassificationWorkflowPrediction ):
            self.input_types = 'raw+pmaps'
        
        # our main applets
        self.objectExtractionApplet = ObjectExtractionApplet(workflow=self, name = "Object Feature Selection")
        self.objectClassificationApplet = ObjectClassificationApplet(workflow=self)
        self.dataExportApplet = ObjectClassificationDataExportApplet(self, "Object Information Export")
        self.dataExportApplet.set_exporting_operator(self.objectClassificationApplet.topLevelOperator)
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( self.dataSelectionApplet.topLevelOperator.WorkingDirectory )
        
        # See EXPORT_SELECTION_PREDICTIONS and EXPORT_SELECTION_PROBABILITIES, above
        opDataExport.SelectionNames.setValue( ['Object Predictions', 'Object Probabilities'] )        
        if self.input_types == 'raw':
            # Re-configure to add the pixel probabilities option
            # See EXPORT_SELECTION_PIXEL_PROBABILITIES, above
            opDataExport.SelectionNames.setValue( ['Object Predictions', 'Object Probabilities', 'Pixel Probabilities'] )

        self._applets.append(self.objectExtractionApplet)
        self._applets.append(self.objectClassificationApplet)
        self._applets.append(self.dataExportApplet)

        if self.batch:
            self.dataSelectionAppletBatch = DataSelectionApplet(
                    self, "Batch Inputs", "Batch Inputs", batchDataGui=True)
            self.opDataSelectionBatch = self.dataSelectionAppletBatch.topLevelOperator
            
            if self.input_types == 'raw':
                self.opDataSelectionBatch.DatasetRoles.setValue(['Raw Data'])
            elif self.input_types == 'raw+binary':
                self.opDataSelectionBatch.DatasetRoles.setValue(['Raw Data', 'Binary Data'])
            elif self.input_types == 'raw+pmaps':
                self.opDataSelectionBatch.DatasetRoles.setValue(['Raw Data', 'Prediction Maps'])
            else:
                assert False, "Unknown object classification subclass type."
    
            self.blockwiseObjectClassificationApplet = BlockwiseObjectClassificationApplet(
                self, "Blockwise Object Classification", "Blockwise Object Classification")
            self._applets.append(self.blockwiseObjectClassificationApplet)

            self.batchExportApplet = ObjectClassificationDataExportApplet(
                self, "Batch Object Prediction Export", isBatch=True)
        
            opBatchDataExport = self.batchExportApplet.topLevelOperator
            opBatchDataExport.WorkingDirectory.connect( self.dataSelectionApplet.topLevelOperator.WorkingDirectory )

            self._applets.append(self.dataSelectionAppletBatch)
            self._applets.append(self.batchExportApplet)

            self._initBatchWorkflow()

            self._batch_export_args = None
            self._batch_input_args = None
            if unused_args:
                # Additional export args (specific to the object classification workflow)
                export_arg_parser = argparse.ArgumentParser()
                export_arg_parser.add_argument( "--table_filename", help="The location to export the object feature/prediction CSV file.", required=False )
                export_arg_parser.add_argument( "--export_object_prediction_img", action="store_true" )
                export_arg_parser.add_argument( "--export_object_probability_img", action="store_true" )

                # TODO: Support this, too, someday?
                #export_arg_parser.add_argument( "--export_object_label_img", action="store_true" )
                
                if self.input_types == 'raw':
                    export_arg_parser.add_argument( "--export_pixel_probability_img", action="store_true" )
                self._export_args, unused_args = export_arg_parser.parse_known_args(unused_args)

                # We parse the export setting args first.  All remaining args are considered input files by the input applet.
                self._batch_export_args, unused_args = self.batchExportApplet.parse_known_cmdline_args( unused_args )
                self._batch_input_args, unused_args = self.dataSelectionAppletBatch.parse_known_cmdline_args( unused_args )

        if unused_args:
            warnings.warn("Unused command-line args: {}".format( unused_args ))

    @property
    def applets(self):
        return self._applets

    @property
    def imageNameListSlot(self):
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def connectLane(self, laneIndex):
        rawslot, binaryslot = self.connectInputs(laneIndex)

        opData = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex)

        opObjExtraction = self.objectExtractionApplet.topLevelOperator.getLane(laneIndex)
        opObjClassification = self.objectClassificationApplet.topLevelOperator.getLane(laneIndex)
        opDataExport = self.dataExportApplet.topLevelOperator.getLane(laneIndex)

        opObjExtraction.RawImage.connect(rawslot)
        opObjExtraction.BinaryImage.connect(binaryslot)

        opObjClassification.RawImages.connect(rawslot)
        opObjClassification.LabelsAllowedFlags.connect(opData.AllowLabels)
        opObjClassification.BinaryImages.connect(binaryslot)

        opObjClassification.SegmentationImages.connect(opObjExtraction.LabelImage)
        opObjClassification.ObjectFeatures.connect(opObjExtraction.RegionFeatures)
        opObjClassification.ComputedFeatureNames.connect(opObjExtraction.ComputedFeatureNames)

        # Data Export connections
        opDataExport.RawData.connect( opData.ImageGroup[0] )
        opDataExport.RawDatasetInfo.connect( opData.DatasetGroup[0] )
        opDataExport.Inputs.resize(2)
        opDataExport.Inputs[EXPORT_SELECTION_PREDICTIONS].connect( opObjClassification.UncachedPredictionImages )
        opDataExport.Inputs[EXPORT_SELECTION_PROBABILITIES].connect( opObjClassification.ProbabilityChannelImage )
        if self.input_types == 'raw':
            # Append the prediction probabilities to the list of slots that can be exported.
            opDataExport.Inputs.resize(3)
            # Pull from this slot since the data has already been through the Op5 operator
            # (All data in the export operator must have matching spatial dimensions.)
            opThreshold = self.thresholdingApplet.topLevelOperator.getLane(laneIndex)
            opDataExport.Inputs[EXPORT_SELECTION_PIXEL_PROBABILITIES].connect( opThreshold.InputImage )

        if self.batch:
            opObjClassification = self.objectClassificationApplet.topLevelOperator.getLane(laneIndex)
            opBlockwiseObjectClassification = self.blockwiseObjectClassificationApplet.topLevelOperator.getLane(laneIndex)

            opBlockwiseObjectClassification.RawImage.connect(opObjClassification.RawImages)
            opBlockwiseObjectClassification.BinaryImage.connect(opObjClassification.BinaryImages)
            opBlockwiseObjectClassification.Classifier.connect(opObjClassification.Classifier)
            opBlockwiseObjectClassification.LabelsCount.connect(opObjClassification.NumLabels)
            opBlockwiseObjectClassification.SelectedFeatures.connect(opObjClassification.SelectedFeatures)

    def _initBatchWorkflow(self):
        # Access applet operators from the training workflow
        opObjectTrainingTopLevel = self.objectClassificationApplet.topLevelOperator
        opBlockwiseObjectClassification = self.blockwiseObjectClassificationApplet.topLevelOperator

        # If we are not in the binary workflow, connect the thresholding operator.
        # Parameter inputs are cloned from the interactive workflow,
        if isinstance(self, ObjectClassificationWorkflowBinary):
            #FIXME
            pass
        else:
            opInteractiveThreshold = self.thresholdingApplet.topLevelOperator
            opBatchThreshold = OperatorWrapper(OpThresholdTwoLevels, parent=self)
            opBatchThreshold.MinSize.connect(opInteractiveThreshold.MinSize)
            opBatchThreshold.MaxSize.connect(opInteractiveThreshold.MaxSize)
            opBatchThreshold.HighThreshold.connect(opInteractiveThreshold.HighThreshold)
            opBatchThreshold.LowThreshold.connect(opInteractiveThreshold.LowThreshold)
            opBatchThreshold.SingleThreshold.connect(opInteractiveThreshold.SingleThreshold)
            opBatchThreshold.SmootherSigma.connect(opInteractiveThreshold.SmootherSigma)
            opBatchThreshold.Channel.connect(opInteractiveThreshold.Channel)
            opBatchThreshold.CurOperator.connect(opInteractiveThreshold.CurOperator)

        # OpDataSelectionGroup.ImageGroup is indexed by [laneIndex][roleIndex],
        # but we need a slot that is indexed by [roleIndex][laneIndex]
        # so we can pass each role to the appropriate slots.
        # We use OpTransposeSlots to do this.
        opBatchInputByRole = OpTransposeSlots( parent=self )
        opBatchInputByRole.Inputs.connect( self.opDataSelectionBatch.ImageGroup )
        opBatchInputByRole.OutputLength.setValue(2)
        
        # Lane-indexed multislot for raw data
        batchInputsRaw = opBatchInputByRole.Outputs[0]
        # Lane-indexed multislot for binary/prediction-map data
        batchInputsOther = opBatchInputByRole.Outputs[1]

        # Connect the blockwise classification operator
        # Parameter inputs are cloned from the interactive workflow,
        opBatchClassify = OperatorWrapper(OpBlockwiseObjectClassification, parent=self,
                                          promotedSlotNames=['RawImage', 'BinaryImage'])
        opBatchClassify.Classifier.connect(opObjectTrainingTopLevel.Classifier)
        opBatchClassify.LabelsCount.connect(opObjectTrainingTopLevel.NumLabels)
        opBatchClassify.SelectedFeatures.connect(opObjectTrainingTopLevel.SelectedFeatures)
        opBatchClassify.BlockShape3dDict.connect(opBlockwiseObjectClassification.BlockShape3dDict)
        opBatchClassify.HaloPadding3dDict.connect(opBlockwiseObjectClassification.HaloPadding3dDict)

        #  but image pathway is from the batch pipeline
        op5Raw = OperatorWrapper(OpReorderAxes, parent=self)
        
        if self.fillMissing != 'none':
            opBatchFillMissingSlices = OperatorWrapper(OpFillMissingSlicesNoCache, parent=self)
            opBatchFillMissingSlices.Input.connect(batchInputsRaw)
            op5Raw.Input.connect(opBatchFillMissingSlices.Output)
        else:
            op5Raw.Input.connect(batchInputsRaw)
        
        
        op5Binary = OperatorWrapper(OpReorderAxes, parent=self)
        if self.input_types != 'raw+binary':
            op5Pred = OperatorWrapper(OpReorderAxes, parent=self)
            op5Pred.Input.connect(batchInputsOther)
            opBatchThreshold.RawInput.connect(op5Raw.Output)
            opBatchThreshold.InputImage.connect(op5Pred.Output)
            op5Binary.Input.connect(opBatchThreshold.Output)
        else:
            op5Binary.Input.connect(batchInputsOther)

        opBatchClassify.RawImage.connect(op5Raw.Output)
        opBatchClassify.BinaryImage.connect(op5Binary.Output)

        self.opBatchClassify = opBatchClassify

        # We need to transpose the dataset group, because it is indexed by [image_index][group_index]
        # But we want it to be indexed by [group_index][image_index] for the RawDatasetInfo connection, below.
        opTransposeDatasetGroup = OpTransposeSlots( parent=self )
        opTransposeDatasetGroup.OutputLength.setValue(1)
        opTransposeDatasetGroup.Inputs.connect( self.opDataSelectionBatch.DatasetGroup )

        # Connect the batch OUTPUT applet
        opBatchExport = self.batchExportApplet.topLevelOperator
        opBatchExport.RawData.connect( batchInputsRaw )
        opBatchExport.RawDatasetInfo.connect( opTransposeDatasetGroup.Outputs[0] )
        
        # See EXPORT_SELECTION_PREDICTIONS and EXPORT_SELECTION_PROBABILITIES, above
        opBatchExport.SelectionNames.setValue( ['Object Predictions', 'Object Probabilities'] )        
        # opBatchResults.Inputs is indexed by [lane][selection],
        # Use OpTranspose to allow connection.
        opTransposeBatchInputs = OpTransposeSlots( parent=self )
        opTransposeBatchInputs.OutputLength.setValue(0)
        opTransposeBatchInputs.Inputs.resize(2)
        opTransposeBatchInputs.Inputs[EXPORT_SELECTION_PREDICTIONS].connect( opBatchClassify.PredictionImage ) # selection 0
        opTransposeBatchInputs.Inputs[EXPORT_SELECTION_PROBABILITIES].connect( opBatchClassify.ProbabilityChannelImage ) # selection 1
        
        # Now opTransposeBatchInputs.Outputs is level-2 indexed by [lane][selection]
        opBatchExport.Inputs.connect( opTransposeBatchInputs.Outputs )

    def onProjectLoaded(self, projectManager):
        if self._headless and self._batch_input_args and self._batch_export_args:
            
            # Check for problems: Is the project file ready to use?
            opObjClassification = self.objectClassificationApplet.topLevelOperator
            if not opObjClassification.Classifier.ready():
                logger.error( "Can't run batch prediction.\n"
                              "Couldn't obtain a classifier from your project file: {}.\n"
                              "Please make sure your project is fully configured with a trained classifier."
                              .format(projectManager.currentProjectPath) )
                return

            # Configure the batch data selection operator.
            if self._batch_input_args and self._batch_input_args.raw_data:
                self.dataSelectionAppletBatch.configure_operator_with_parsed_args( self._batch_input_args )
            
            # Configure the data export operator.
            if self._batch_export_args:
                self.batchExportApplet.configure_operator_with_parsed_args( self._batch_export_args )

            self.opBatchClassify.BlockShape3dDict.disconnect()

            # For each BATCH lane...
            for lane_index, opBatchClassifyView in enumerate(self.opBatchClassify):
                # Force the block size to be the same as image size (1 big block)
                tagged_shape = opBatchClassifyView.RawImage.meta.getTaggedShape()
                try:
                    tagged_shape.pop('t')
                except KeyError:
                    pass
                try:
                    tagged_shape.pop('c')
                except KeyError:
                    pass
                opBatchClassifyView.BlockShape3dDict.setValue( tagged_shape )

                # For now, we force the entire result to be computed as one big block.
                # Force the batch classify op to create an internal pipeline for our block.
                opBatchClassifyView._ensurePipelineExists( (0,0,0,0,0) )
                opSingleBlockClassify = opBatchClassifyView._blockPipelines[(0,0,0,0,0)]

                # Export the images (if any)
                if self.input_types == 'raw':
                    # If pixel probabilities need export, do that first.
                    # (They are needed by the other outputs, anyway)
                    if self._export_args.export_pixel_probability_img:
                        self._export_batch_image( lane_index, EXPORT_SELECTION_PIXEL_PROBABILITIES, 'pixel-probability-img' )
                if self._export_args.export_object_prediction_img:
                    self._export_batch_image( lane_index, EXPORT_SELECTION_PREDICTIONS, 'object-prediction-img' )
                if self._export_args.export_object_probability_img:
                    self._export_batch_image( lane_index, EXPORT_SELECTION_PROBABILITIES, 'object-probability-img' )

                # Export the CSV
                csv_filename = self._export_args.table_filename
                if csv_filename:
                    feature_table = opSingleBlockClassify._opPredict.createExportTable([])
                    if len(self.opBatchClassify) > 1:
                        base, ext = os.path.splitext( csv_filename )
                        csv_filename = base + '-' + str(lane_index) + ext
                    print "Exporting object table for image #{}:\n{}".format( lane_index, csv_filename )
                    self.record_array_to_csv(feature_table, csv_filename)
                
                print "FINISHED."

    def _export_batch_image(self, lane_index, selection_index, selection_name):
        opBatchExport = self.batchExportApplet.topLevelOperator
        opBatchExport.InputSelection.setValue(selection_index)
        opBatchExportView = opBatchExport.getLane(lane_index)

        # Remember this so we can restore it later
        default_output_path = opBatchExport.OutputFilenameFormat.value
        export_path = opBatchExportView.ExportPath.value

        path_comp = PathComponents( export_path, os.getcwd() )
        path_comp.filenameBase += '-' + selection_name
        opBatchExport.OutputFilenameFormat.setValue( path_comp.externalPath )
        
        logger.info( "Exporting {} for image #{} to {}"
                     .format(selection_name, lane_index+1, opBatchExportView.ExportPath.value) )

        sys.stdout.write( "Result {}/{} Progress: "
                          .format( lane_index+1, len( self.opBatchClassify ) ) )
        sys.stdout.flush()
        def print_progress( progress ):
            sys.stdout.write( "{} ".format( progress ) )
            sys.stdout.flush()

        # If the operator provides a progress signal, use it.
        slotProgressSignal = opBatchExportView.progressSignal
        slotProgressSignal.subscribe( print_progress )
        opBatchExportView.run_export()
        
        # Finished.
        sys.stdout.write("\n")
        
        # Restore original format
        opBatchExport.OutputFilenameFormat.setValue( default_output_path )

    def record_array_to_csv(self, record_array, filename):
        """
        Save the given record array to a CSV file.
        """
        # Sort by offset
        with open(filename, 'w') as csv_file:
            sorted_fields = sorted( record_array.dtype.fields.items(), key=lambda (k,v): v[1] )
            field_names = map( lambda (k,v): k, sorted_fields )
            for name in field_names:
                # Remove any commas in the header (this is csv, after all)
                name = name.replace(',', '/')
                csv_file.write(name + ',')
            csv_file.write('\n')
            for row in record_array:
                for name in field_names:
                    csv_file.write(str(row[name]) + ',')
                csv_file.write('\n')

    def getHeadlessOutputSlot(self, slotId):
        if slotId == "BatchPredictionImage":
            return self.opBatchClassify.PredictionImage
        raise Exception("Unknown headless output slot")

    def postprocessClusterSubResult(self, roi, result, blockwise_fileset):
        """
        """
        # TODO: Here, we hard-code to select from the first lane only.
        opBatchClassify = self.opBatchClassify[0]
        
        from lazyflow.utility.io.blockwiseFileset import vectorized_pickle_dumps
        # Assume that roi always starts as a multiple of the blockshape
        block_shape = opBatchClassify.get_blockshape()
        assert all(block_shape == blockwise_fileset.description.sub_block_shape), "block shapes don't match"
        assert all((roi[0] % block_shape) == 0), "Sub-blocks must exactly correspond to the blockwise object classification blockshape"
        sub_block_index = roi[0] / blockwise_fileset.description.sub_block_shape

        sub_block_start = sub_block_index
        sub_block_stop = sub_block_start + 1
        sub_block_roi = (sub_block_start, sub_block_stop)
        
        # FIRST, remove all objects that lie outside the block (i.e. remove the ones in the halo)
        region_features = opBatchClassify.BlockwiseRegionFeatures( *sub_block_roi ).wait()
        region_features_dict = region_features.flat[0]
        region_centers = region_features_dict['Default features']['RegionCenter']

        opBlockPipeline = opBatchClassify._blockPipelines[ tuple(roi[0]) ]

        # Compute the block offset within the image coordinates
        halo_roi = opBlockPipeline._halo_roi

        translated_region_centers = region_centers + halo_roi[0][1:-1]

        # TODO: If this is too slow, vectorize this
        mask = numpy.zeros( region_centers.shape[0], dtype=numpy.bool_ )
        for index, translated_region_center in enumerate(translated_region_centers):
            # FIXME: Here we assume t=0 and c=0
            mask[index] = opBatchClassify.is_in_block( roi[0], (0,) + tuple(translated_region_center) + (0,) )
        
        # Always exclude the first object (it's the background??)
        mask[0] = False
        
        # Remove all 'negative' predictions, emit only 'positive' predictions
        # FIXME: Don't hardcode this?
        POSITIVE_LABEL = 2
        objectwise_predictions = opBlockPipeline.ObjectwisePredictions([]).wait()[0]
        assert objectwise_predictions.shape == mask.shape
        mask[objectwise_predictions != POSITIVE_LABEL] = False

        filtered_features = {}
        for feature_group, feature_dict in region_features_dict.items():
            filtered_group = filtered_features[feature_group] = {}
            for feature_name, feature_array in feature_dict.items():
                filtered_group[feature_name] = feature_array[mask]

        # SECOND, translate from block-local coordinates to global (file) coordinates.
        # Unfortunately, we've got multiple translations to perform here:
        # Coordinates in the region features are relative to their own block INCLUDING HALO,
        #  so we need to add the start of the block-with-halo as an offset.
        # BUT the image itself may be offset relative to the BlockwiseFileset coordinates
        #  (due to the view_origin setting), so we also need to add an offset for that, too

        # Get the image offset relative to the file coordinates
        image_offset = blockwise_fileset.description.view_origin
        
        total_offset_5d = halo_roi[0] + image_offset
        total_offset_3d = total_offset_5d[1:-1]

        filtered_features["Default features"]["RegionCenter"] += total_offset_3d
        filtered_features["Default features"]["Coord<Minimum>"] += total_offset_3d
        filtered_features["Default features"]["Coord<Maximum>"] += total_offset_3d

        # Finally, write the features to hdf5
        h5File = blockwise_fileset.getOpenHdf5FileForBlock( roi[0] )
        if 'pickled_region_features' in h5File:
            del h5File['pickled_region_features']

        # Must use str dtype
        dtype = h5py.new_vlen(str)
        dataset = h5File.create_dataset( 'pickled_region_features', shape=(1,), dtype=dtype )
        pickled_features = vectorized_pickle_dumps(numpy.array((filtered_features,)))
        dataset[0] = pickled_features

        object_centers_xyz = filtered_features["Default features"]["RegionCenter"].astype(int)
        object_min_coords_xyz = filtered_features["Default features"]["Coord<Minimum>"].astype(int)
        object_max_coords_xyz = filtered_features["Default features"]["Coord<Maximum>"].astype(int)
        object_sizes = filtered_features["Default features"]["Count"][:,0].astype(int)

        # Also, write out selected features as a 'point cloud' csv file.
        # (Store the csv file next to this block's h5 file.)
        dataset_directory = blockwise_fileset.getDatasetDirectory(roi[0])
        pointcloud_path = os.path.join( dataset_directory, "block-pointcloud.csv" )
        
        logger.info("Writing to csv: {}".format( pointcloud_path ))
        with open(pointcloud_path, "w") as fout:
            csv_writer = csv.DictWriter(fout, OUTPUT_COLUMNS, **CSV_FORMAT)
            csv_writer.writeheader()
        
            for obj_id in range(len(object_sizes)):
                fields = {}
                fields["x_px"], fields["y_px"], fields["z_px"], = object_centers_xyz[obj_id]
                fields["min_x_px"], fields["min_y_px"], fields["min_z_px"], = object_min_coords_xyz[obj_id]
                fields["max_x_px"], fields["max_y_px"], fields["max_z_px"], = object_max_coords_xyz[obj_id]
                fields["size_px"] = object_sizes[obj_id]

                csv_writer.writerow( fields )
                #fout.flush()
        
        logger.info("FINISHED csv export")

    def handleAppletStateUpdateRequested(self, upstream_ready=False):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.appletStateUpdateRequested`
        
        This method will be called by the child classes with the result of their
        own applet readyness findings as keyword argument.
        """

        # all workflows have these applets in common:

        # object feature selection
        # object classification
        # object prediction export
        # blockwise classification
        # batch input
        # batch prediction export

        cumulated_readyness = upstream_ready
        self._shell.setAppletEnabled(self.objectExtractionApplet, cumulated_readyness)

        if len(self.objectExtractionApplet.topLevelOperator.ComputedFeatureNames) == 0:
            object_features_ready = False
        else:
            object_features_ready = True
            for slot in self.objectExtractionApplet.topLevelOperator.ComputedFeatureNames:
                object_features_ready = object_features_ready and len(slot.value) > 0
        #object_features_ready = self.objectExtractionApplet.topLevelOperator.RegionFeatures.ready()

        cumulated_readyness = cumulated_readyness and object_features_ready
        self._shell.setAppletEnabled(self.objectClassificationApplet, cumulated_readyness)

        object_classification_ready = \
            self.objectClassificationApplet.predict_enabled

        cumulated_readyness = cumulated_readyness and object_classification_ready
        self._shell.setAppletEnabled(self.dataExportApplet, cumulated_readyness)

        if self.batch:
            object_prediction_ready = True  # TODO is that so?
            cumulated_readyness = cumulated_readyness and object_prediction_ready

            self._shell.setAppletEnabled(self.blockwiseObjectClassificationApplet, cumulated_readyness)
            self._shell.setAppletEnabled(self.dataSelectionAppletBatch, cumulated_readyness)
            self._shell.setAppletEnabled(self.batchExportApplet, cumulated_readyness)

        # Lastly, check for certain "busy" conditions, during which we 
        # should prevent the shell from closing the project.
        #TODO implement
        busy = False
        self._shell.enableProjectChanges( not busy )

    def _inputReady(self, nRoles):
        slot = self.dataSelectionApplet.topLevelOperator.ImageGroup
        if len(slot) > 0:
            input_ready = True
            for sub in slot:
                input_ready = input_ready and \
                    all([sub[i].ready() for i in range(nRoles)])
        else:
            input_ready = False

        return input_ready
Ejemplo n.º 44
0
class DataConversionWorkflow(Workflow):
    """
    Simple workflow for converting data between formats.  Has only two applets: Data Selection and Data Export.
    
    Also supports a command-line interface for headless mode.
    
    For example:
    
    .. code-block:: bash

        python ilastik.py --headless --new_project=NewTemporaryProject.ilp --workflow=DataConversionWorkflow --output_format="png sequence" ~/input1.h5 ~/input2.h5
    
    Or if you have an existing project with input files already selected and configured:

    .. code-block:: bash

        python ilastik.py --headless --project=MyProject.ilp --output_format=jpeg
    
    .. note:: Beware of issues related to absolute vs. relative paths.  Relative links are stored relative to the project file.
              To avoid this issue entirely, either 
                 (1) use only absolute filepaths
              or (2) cd into your project file's directory before launching ilastik.
    
    """
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, *args, **kwargs):

        
        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs)
        self._applets = []

        # Create applets 
        self.dataSelectionApplet = DataSelectionApplet(self, 
                                                       "Input Data", 
                                                       "Input Data", 
                                                       supportIlastik05Import=True, 
                                                       batchDataGui=False,
                                                       force5d=False)

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        role_names = ["Input Data"]
        opDataSelection.DatasetRoles.setValue( role_names )

        self.dataExportApplet = DataExportApplet(self, "Data Export")

        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( ["Input"] )        

        self._applets.append( self.dataSelectionApplet )
        self._applets.append( self.dataExportApplet )

        # Parse command-line arguments
        # Command-line args are applied in onProjectLoaded(), below.
        self._workflow_cmdline_args = workflow_cmdline_args
        self._data_input_args = None
        self._data_export_args = None
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args( unused_args )
            self._data_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args( workflow_cmdline_args, role_names )
            if unused_args:
                logger.warn("Unused command-line args: {}".format( unused_args ))

    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.
        
        If the user provided command-line arguments, use them to configure 
        the workflow inputs and output settings.
        """
        # Configure the batch data selection operator.
        if self._data_input_args and self._data_input_args.input_files:
            self.dataSelectionApplet.configure_operator_with_parsed_args( self._data_input_args )
        
        # Configure the data export operator.
        if self._data_export_args:
            self.dataExportApplet.configure_operator_with_parsed_args( self._data_export_args )

        if self._headless and self._data_input_args and self._data_export_args:
            # Now run the export and report progress....
            opDataExport = self.dataExportApplet.topLevelOperator
            for i, opExportDataLaneView in enumerate(opDataExport):
                logger.info( "Exporting file #{} to {}".format(i, opExportDataLaneView.ExportPath.value) )
    
                sys.stdout.write( "Result #{}/{} Progress: ".format( i, len( opDataExport ) ) )
                def print_progress( progress ):
                    sys.stdout.write( "{} ".format( progress ) )
    
                # If the operator provides a progress signal, use it.
                slotProgressSignal = opExportDataLaneView.progressSignal
                slotProgressSignal.subscribe( print_progress )
                opExportDataLaneView.run_export()
                
                # Finished.
                sys.stdout.write("\n")

    def connectLane(self, laneIndex):
        opDataSelectionView = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex)
        opDataExportView = self.dataExportApplet.topLevelOperator.getLane(laneIndex)

        opDataExportView.RawDatasetInfo.connect( opDataSelectionView.DatasetGroup[0] )        
        opDataExportView.Inputs.resize( 1 )
        opDataExportView.Inputs[0].connect( opDataSelectionView.ImageGroup[0] )

        # There is no special "raw" display layer in this workflow.
        #opDataExportView.RawData.connect( opDataSelectionView.ImageGroup[0] )

    @property
    def applets(self):
        return self._applets

    @property
    def imageNameListSlot(self):
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def handleAppletStateUpdateRequested(self):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.statusUpdateSignal`
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        input_ready = len(opDataSelection.ImageGroup) > 0

        opDataExport = self.dataExportApplet.topLevelOperator
        export_data_ready = input_ready and \
                            len(opDataExport.Inputs[0]) > 0 and \
                            opDataExport.Inputs[0][0].ready() and \
                            (TinyVector(opDataExport.Inputs[0][0].meta.shape) > 0).all()

        self._shell.setAppletEnabled(self.dataExportApplet, export_data_ready)
        
        # Lastly, check for certain "busy" conditions, during which we 
        #  should prevent the shell from closing the project.
        busy = False
        busy |= self.dataSelectionApplet.busy
        busy |= self.dataExportApplet.busy
        self._shell.enableProjectChanges( not busy )
    def __init__(self, shell, headless,
                 workflow_cmdline_args,
                 project_creation_args,
                 *args, **kwargs):
        graph = kwargs['graph'] if 'graph' in kwargs else Graph()
        if 'graph' in kwargs:
            del kwargs['graph']
        super(ObjectClassificationWorkflow, self).__init__(shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs)

        # Parse workflow-specific command-line args
        parser = argparse.ArgumentParser()
        parser.add_argument('--fillmissing', help="use 'fill missing' applet with chosen detection method", choices=['classic', 'svm', 'none'], default='none')
        parser.add_argument('--filter', help="pixel feature filter implementation.", choices=['Original', 'Refactored', 'Interpolated'], default='Original')
        parser.add_argument('--nobatch', help="do not append batch applets", action='store_true', default=False)
        
        parsed_creation_args, unused_args = parser.parse_known_args(project_creation_args)

        self.fillMissing = parsed_creation_args.fillmissing
        self.filter_implementation = parsed_creation_args.filter

        parsed_args, unused_args = parser.parse_known_args(workflow_cmdline_args)
        if parsed_args.fillmissing != 'none' and parsed_creation_args.fillmissing != parsed_args.fillmissing:
            logger.error( "Ignoring --fillmissing cmdline arg.  Can't specify a different fillmissing setting after the project has already been created." )
        
        if parsed_args.filter != 'Original' and parsed_creation_args.filter != parsed_args.filter:
            logger.error( "Ignoring --filter cmdline arg.  Can't specify a different filter setting after the project has already been created." )

        self.batch = not parsed_args.nobatch

        self._applets = []

        self.projectMetadataApplet = ProjectMetadataApplet()
        self._applets.append(self.projectMetadataApplet)

        self.setupInputs()
        
        if self.fillMissing != 'none':
            self.fillMissingSlicesApplet = FillMissingSlicesApplet(
                self, "Fill Missing Slices", "Fill Missing Slices", self.fillMissing)
            self._applets.append(self.fillMissingSlicesApplet)

        if isinstance(self, ObjectClassificationWorkflowPixel):
            self.input_types = 'raw'
        elif isinstance(self, ObjectClassificationWorkflowBinary):
            self.input_types = 'raw+binary'
        elif isinstance( self, ObjectClassificationWorkflowPrediction ):
            self.input_types = 'raw+pmaps'
        
        # our main applets
        self.objectExtractionApplet = ObjectExtractionApplet(workflow=self, name = "Object Feature Selection")
        self.objectClassificationApplet = ObjectClassificationApplet(workflow=self)
        self.dataExportApplet = ObjectClassificationDataExportApplet(self, "Object Information Export")
        self.dataExportApplet.set_exporting_operator(self.objectClassificationApplet.topLevelOperator)
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( self.dataSelectionApplet.topLevelOperator.WorkingDirectory )
        
        # See EXPORT_SELECTION_PREDICTIONS and EXPORT_SELECTION_PROBABILITIES, above
        opDataExport.SelectionNames.setValue( ['Object Predictions', 'Object Probabilities'] )        
        if self.input_types == 'raw':
            # Re-configure to add the pixel probabilities option
            # See EXPORT_SELECTION_PIXEL_PROBABILITIES, above
            opDataExport.SelectionNames.setValue( ['Object Predictions', 'Object Probabilities', 'Pixel Probabilities'] )

        self._applets.append(self.objectExtractionApplet)
        self._applets.append(self.objectClassificationApplet)
        self._applets.append(self.dataExportApplet)

        if self.batch:
            self.dataSelectionAppletBatch = DataSelectionApplet(
                    self, "Batch Inputs", "Batch Inputs", batchDataGui=True)
            self.opDataSelectionBatch = self.dataSelectionAppletBatch.topLevelOperator
            
            if self.input_types == 'raw':
                self.opDataSelectionBatch.DatasetRoles.setValue(['Raw Data'])
            elif self.input_types == 'raw+binary':
                self.opDataSelectionBatch.DatasetRoles.setValue(['Raw Data', 'Binary Data'])
            elif self.input_types == 'raw+pmaps':
                self.opDataSelectionBatch.DatasetRoles.setValue(['Raw Data', 'Prediction Maps'])
            else:
                assert False, "Unknown object classification subclass type."
    
            self.blockwiseObjectClassificationApplet = BlockwiseObjectClassificationApplet(
                self, "Blockwise Object Classification", "Blockwise Object Classification")
            self._applets.append(self.blockwiseObjectClassificationApplet)

            self.batchExportApplet = ObjectClassificationDataExportApplet(
                self, "Batch Object Prediction Export", isBatch=True)
        
            opBatchDataExport = self.batchExportApplet.topLevelOperator
            opBatchDataExport.WorkingDirectory.connect( self.dataSelectionApplet.topLevelOperator.WorkingDirectory )

            self._applets.append(self.dataSelectionAppletBatch)
            self._applets.append(self.batchExportApplet)

            self._initBatchWorkflow()

            self._batch_export_args = None
            self._batch_input_args = None
            if unused_args:
                # Additional export args (specific to the object classification workflow)
                export_arg_parser = argparse.ArgumentParser()
                export_arg_parser.add_argument( "--table_filename", help="The location to export the object feature/prediction CSV file.", required=False )
                export_arg_parser.add_argument( "--export_object_prediction_img", action="store_true" )
                export_arg_parser.add_argument( "--export_object_probability_img", action="store_true" )

                # TODO: Support this, too, someday?
                #export_arg_parser.add_argument( "--export_object_label_img", action="store_true" )
                
                if self.input_types == 'raw':
                    export_arg_parser.add_argument( "--export_pixel_probability_img", action="store_true" )
                self._export_args, unused_args = export_arg_parser.parse_known_args(unused_args)

                # We parse the export setting args first.  All remaining args are considered input files by the input applet.
                self._batch_export_args, unused_args = self.batchExportApplet.parse_known_cmdline_args( unused_args )
                self._batch_input_args, unused_args = self.dataSelectionAppletBatch.parse_known_cmdline_args( unused_args )

        if unused_args:
            warnings.warn("Unused command-line args: {}".format( unused_args ))
Ejemplo n.º 46
0
class WsdtWorkflow(Workflow):
    workflowName = "Watershed Over Distance Transform"
    workflowDescription = "A bare-bones workflow for using the WSDT applet"
    defaultAppletIndex = 0 # show DataSelection by default

    DATA_ROLE_RAW = 0
    DATA_ROLE_PROBABILITIES = 1
    ROLE_NAMES = ['Raw Data', 'Probabilities']
    EXPORT_NAMES = ['Watershed']

    @property
    def applets(self):
        return self._applets

    @property
    def imageNameListSlot(self):
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_workflow, *args, **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()

        super(WsdtWorkflow, self).__init__( shell, headless, workflow_cmdline_args, project_creation_workflow, graph=graph, *args, **kwargs)
        self._applets = []

        # -- DataSelection applet
        #
        self.dataSelectionApplet = DataSelectionApplet(self, "Input Data", "Input Data")

        # Dataset inputs
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( self.ROLE_NAMES )

        # -- Wsdt applet
        #
        self.wsdtApplet = WsdtApplet(self, "Watershed", "Wsdt Watershed")

        # -- DataExport applet
        #
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( self.EXPORT_NAMES )

        # -- BatchProcessing applet
        #
        self.batchProcessingApplet = BatchProcessingApplet(self,
                                                           "Batch Processing",
                                                           self.dataSelectionApplet,
                                                           self.dataExportApplet)

        # -- Expose applets to shell
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.wsdtApplet)
        self._applets.append(self.dataExportApplet)
        self._applets.append(self.batchProcessingApplet)

        # -- Parse command-line arguments
        #    (Command-line args are applied in onProjectLoaded(), below.)
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args( workflow_cmdline_args )
            self._batch_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args( unused_args, role_names )
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warning("Unused command-line args: {}".format( unused_args ))

    def connectLane(self, laneIndex):
        """
        Override from base class.
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex)
        opWsdt = self.wsdtApplet.topLevelOperator.getLane(laneIndex)
        opDataExport = self.dataExportApplet.topLevelOperator.getLane(laneIndex)

        # watershed inputs
        opWsdt.RawData.connect( opDataSelection.ImageGroup[self.DATA_ROLE_RAW] )
        opWsdt.Input.connect( opDataSelection.ImageGroup[self.DATA_ROLE_PROBABILITIES] )

        # DataExport inputs
        opDataExport.RawData.connect( opDataSelection.ImageGroup[self.DATA_ROLE_RAW] )
        opDataExport.RawDatasetInfo.connect( opDataSelection.DatasetGroup[self.DATA_ROLE_RAW] )        
        opDataExport.Inputs.resize( len(self.EXPORT_NAMES) )
        opDataExport.Inputs[0].connect( opWsdt.Superpixels )
        for slot in opDataExport.Inputs:
            assert slot.partner is not None
        
    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.
        
        If the user provided command-line arguments, use them to configure 
        the workflow inputs and output settings.
        """
        # Configure the data export operator.
        if self._data_export_args:
            self.dataExportApplet.configure_operator_with_parsed_args( self._data_export_args )

        if self._headless and self._batch_input_args and self._data_export_args:
            logger.info("Beginning Batch Processing")
            self.batchProcessingApplet.run_export_from_parsed_args(self._batch_input_args)
            logger.info("Completed Batch Processing")

    def handleAppletStateUpdateRequested(self):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.appletStateUpdateRequested`
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataExport = self.dataExportApplet.topLevelOperator
        opWsdt = self.wsdtApplet.topLevelOperator

        # If no data, nothing else is ready.
        input_ready = len(opDataSelection.ImageGroup) > 0 and not self.dataSelectionApplet.busy

        # The user isn't allowed to touch anything while batch processing is running.
        batch_processing_busy = self.batchProcessingApplet.busy

        self._shell.setAppletEnabled( self.dataSelectionApplet,   not batch_processing_busy )
        self._shell.setAppletEnabled( self.wsdtApplet,            not batch_processing_busy and input_ready )
        self._shell.setAppletEnabled( self.dataExportApplet,      not batch_processing_busy and input_ready and opWsdt.Superpixels.ready())
        self._shell.setAppletEnabled( self.batchProcessingApplet, not batch_processing_busy and input_ready )

        # Lastly, check for certain "busy" conditions, during which we
        #  should prevent the shell from closing the project.
        busy = False
        busy |= self.dataSelectionApplet.busy
        busy |= self.wsdtApplet.busy
        busy |= self.dataExportApplet.busy
        busy |= self.batchProcessingApplet.busy
        self._shell.enableProjectChanges( not busy )
Ejemplo n.º 47
0
    def _initBatchWorkflow(self):
        """
        Connect the batch-mode top-level operators to the training workflow and to eachother.
        """
        # Access applet operators from the training workflow
        opTrainingDataSelection = self.dataSelectionApplet.topLevelOperator
        opTrainingFeatures = self.featureSelectionApplet.topLevelOperator
        opClassify = self.countingApplet.topLevelOperator
                
        opSelectFirstLane = OperatorWrapper( OpSelectSubslot, parent=self )
        opSelectFirstLane.Inputs.connect( opTrainingDataSelection.ImageGroup )
        opSelectFirstLane.SubslotIndex.setValue(0)
        
        opSelectFirstRole = OpSelectSubslot( parent=self )
        opSelectFirstRole.Inputs.connect( opSelectFirstLane.Output )
        opSelectFirstRole.SubslotIndex.setValue(0)

        ## Create additional batch workflow operators
        opBatchFeatures = OperatorWrapper( OpFeatureSelection, operator_kwargs={'filter_implementation':'Original'}, parent=self, promotedSlotNames=['InputImage'] )
        opBatchPredictionPipeline = OperatorWrapper( OpPredictionPipeline, parent=self )
        
        # Create applets for batch workflow
        self.batchInputApplet = DataSelectionApplet(self, "Batch Prediction Input Selections", "BatchDataSelection", supportIlastik05Import=False, batchDataGui=True)
        self.batchResultsApplet = CountingDataExportApplet(self, "Batch Prediction Output Locations", opBatchPredictionPipeline, isBatch=True)

        # Expose in shell        
        self._applets.append(self.batchInputApplet)
        self._applets.append(self.batchResultsApplet)

        opBatchInputs = self.batchInputApplet.topLevelOperator
        opBatchResults = self.batchResultsApplet.topLevelOperator
        
        opBatchInputs.DatasetRoles.connect( opTrainingDataSelection.DatasetRoles )
        opBatchResults.ConstraintDataset.connect( opSelectFirstRole.Output )
        
        
        ## Connect Operators ##
        opTranspose = OpTransposeSlots( parent=self )
        opTranspose.OutputLength.setValue(1)
        opTranspose.Inputs.connect( opBatchInputs.DatasetGroup )
        
        # Provide dataset paths from data selection applet to the batch export applet
        opBatchResults.RawDatasetInfo.connect( opTranspose.Outputs[0] )
        opBatchResults.WorkingDirectory.connect( opBatchInputs.WorkingDirectory )
        
        # Connect (clone) the feature operator inputs from 
        #  the interactive workflow's features operator (which gets them from the GUI)
        opBatchFeatures.Scales.connect( opTrainingFeatures.Scales )
        opBatchFeatures.FeatureIds.connect( opTrainingFeatures.FeatureIds )
        opBatchFeatures.SelectionMatrix.connect( opTrainingFeatures.SelectionMatrix )
        
        # Classifier and LabelsCount are provided by the interactive workflow
        opBatchPredictionPipeline.Classifier.connect( opClassify.Classifier )
        opBatchPredictionPipeline.MaxLabel.connect( opClassify.MaxLabelValue )
        opBatchPredictionPipeline.FreezePredictions.setValue( False )
        
        # Provide these for the gui
        opBatchResults.RawData.connect( opBatchInputs.Image )
        opBatchResults.PmapColors.connect( opClassify.PmapColors )
        opBatchResults.LabelNames.connect( opClassify.LabelNames )
        opBatchResults.UpperBound.connect( opClassify.UpperBound )
        
        # Connect Image pathway:
        # Input Image -> Features Op -> Prediction Op -> Export
        opBatchFeatures.InputImage.connect( opBatchInputs.Image )
        opBatchPredictionPipeline.FeatureImages.connect( opBatchFeatures.OutputImage )
        
        opBatchResults.SelectionNames.setValue( ['Probabilities'] )        
        # opBatchResults.Inputs is indexed by [lane][selection],
        # Use OpTranspose to allow connection.
        opTransposeBatchInputs = OpTransposeSlots( parent=self )
        opTransposeBatchInputs.OutputLength.setValue(0)
        opTransposeBatchInputs.Inputs.resize(1)
        opTransposeBatchInputs.Inputs[0].connect( opBatchPredictionPipeline.HeadlessPredictionProbabilities ) # selection 0
        
        # Now opTransposeBatchInputs.Outputs is level-2 indexed by [lane][selection]
        opBatchResults.Inputs.connect( opTransposeBatchInputs.Outputs )

        # We don't actually need the cached path in the batch pipeline.
        # Just connect the uncached features here to satisfy the operator.
        opBatchPredictionPipeline.CachedFeatureImages.connect( opBatchFeatures.OutputImage )

        self.opBatchPredictionPipeline = opBatchPredictionPipeline
Ejemplo n.º 48
0
class CountingWorkflow(Workflow):
    workflowName = "Cell Density Counting"
    workflowDescription = "This is obviously self-explanatory."
    defaultAppletIndex = 1 # show DataSelection by default

    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, appendBatchOperators=True, *args, **kwargs):
        graph = kwargs['graph'] if 'graph' in kwargs else Graph()
        if 'graph' in kwargs: del kwargs['graph']
        super( CountingWorkflow, self ).__init__( shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs )

        # Parse workflow-specific command-line args
        parser = argparse.ArgumentParser()
        parser.add_argument("--csv-export-file", help="Instead of exporting prediction density images, export total counts to the given csv path.")
        self.parsed_counting_workflow_args, unused_args = parser.parse_known_args(workflow_cmdline_args)

        ######################
        # Interactive workflow
        ######################

        self.projectMetadataApplet = ProjectMetadataApplet()

        self.dataSelectionApplet = DataSelectionApplet(self,
                                                       "Input Data",
                                                       "Input Data",
                                                       batchDataGui=False,
                                                       force5d=False
                                                      )
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        role_names = ['Raw Data']
        opDataSelection.DatasetRoles.setValue( role_names )

        self.featureSelectionApplet = FeatureSelectionApplet(self,
                                                             "Feature Selection",
                                                             "FeatureSelections")

        #self.pcApplet = PixelClassificationApplet(self, "PixelClassification")
        self.countingApplet = CountingApplet(workflow=self)
        opCounting = self.countingApplet.topLevelOperator

        self.dataExportApplet = CountingDataExportApplet(self, "Density Export", opCounting)
        
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.PmapColors.connect(opCounting.PmapColors)
        opDataExport.LabelNames.connect(opCounting.LabelNames)
        opDataExport.UpperBound.connect(opCounting.UpperBound)
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)
        opDataExport.SelectionNames.setValue( ['Probabilities'] )        

        self._applets = []
        self._applets.append(self.projectMetadataApplet)
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.featureSelectionApplet)
        self._applets.append(self.countingApplet)
        self._applets.append(self.dataExportApplet)


        self._batch_input_args = None
        self._batch_export_args = None

        self.batchInputApplet = None
        self.batchResultsApplet = None

        if appendBatchOperators:
            # Connect batch workflow (NOT lane-based)
            self._initBatchWorkflow()
            if unused_args:
                # We parse the export setting args first.
                # All remaining args are considered input files by the input applet.
                self._batch_export_args, unused_args = self.batchResultsApplet.parse_known_cmdline_args( unused_args )
                self._batch_input_args, unused_args = self.batchInputApplet.parse_known_cmdline_args( unused_args, role_names )

    @property
    def applets(self):
        return self._applets

    @property
    def imageNameListSlot(self):
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def connectLane(self, laneIndex):
        ## Access applet operators
        opData = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex)
        opTrainingFeatures = self.featureSelectionApplet.topLevelOperator.getLane(laneIndex)
        opCounting = self.countingApplet.topLevelOperator.getLane(laneIndex)
        opDataExport = self.dataExportApplet.topLevelOperator.getLane(laneIndex)


        #### connect input image
        opTrainingFeatures.InputImage.connect(opData.Image)

        opCounting.InputImages.connect(opData.Image)
        opCounting.FeatureImages.connect(opTrainingFeatures.OutputImage)
        opCounting.LabelsAllowedFlags.connect(opData.AllowLabels)
        opCounting.CachedFeatureImages.connect( opTrainingFeatures.CachedOutputImage )
        #opCounting.UserLabels.connect(opClassify.LabelImages)
        #opCounting.ForegroundLabels.connect(opObjExtraction.LabelImage)
        opDataExport.Inputs.resize(1)
        opDataExport.Inputs[0].connect( opCounting.HeadlessPredictionProbabilities )
        opDataExport.RawData.connect( opData.ImageGroup[0] )
        opDataExport.RawDatasetInfo.connect( opData.DatasetGroup[0] )
        opDataExport.ConstraintDataset.connect( opData.ImageGroup[0] )

    def _initBatchWorkflow(self):
        """
        Connect the batch-mode top-level operators to the training workflow and to eachother.
        """
        # Access applet operators from the training workflow
        opTrainingDataSelection = self.dataSelectionApplet.topLevelOperator
        opTrainingFeatures = self.featureSelectionApplet.topLevelOperator
        opClassify = self.countingApplet.topLevelOperator
                
        opSelectFirstLane = OperatorWrapper( OpSelectSubslot, parent=self )
        opSelectFirstLane.Inputs.connect( opTrainingDataSelection.ImageGroup )
        opSelectFirstLane.SubslotIndex.setValue(0)
        
        opSelectFirstRole = OpSelectSubslot( parent=self )
        opSelectFirstRole.Inputs.connect( opSelectFirstLane.Output )
        opSelectFirstRole.SubslotIndex.setValue(0)

        ## Create additional batch workflow operators
        opBatchFeatures = OperatorWrapper( OpFeatureSelection, operator_kwargs={'filter_implementation':'Original'}, parent=self, promotedSlotNames=['InputImage'] )
        opBatchPredictionPipeline = OperatorWrapper( OpPredictionPipeline, parent=self )
        
        # Create applets for batch workflow
        self.batchInputApplet = DataSelectionApplet(self, "Batch Prediction Input Selections", "BatchDataSelection", supportIlastik05Import=False, batchDataGui=True)
        self.batchResultsApplet = CountingDataExportApplet(self, "Batch Prediction Output Locations", opBatchPredictionPipeline, isBatch=True)

        # Expose in shell        
        self._applets.append(self.batchInputApplet)
        self._applets.append(self.batchResultsApplet)

        opBatchInputs = self.batchInputApplet.topLevelOperator
        opBatchResults = self.batchResultsApplet.topLevelOperator
        
        opBatchInputs.DatasetRoles.connect( opTrainingDataSelection.DatasetRoles )
        opBatchResults.ConstraintDataset.connect( opSelectFirstRole.Output )
        
        
        ## Connect Operators ##
        opTranspose = OpTransposeSlots( parent=self )
        opTranspose.OutputLength.setValue(1)
        opTranspose.Inputs.connect( opBatchInputs.DatasetGroup )
        
        # Provide dataset paths from data selection applet to the batch export applet
        opBatchResults.RawDatasetInfo.connect( opTranspose.Outputs[0] )
        opBatchResults.WorkingDirectory.connect( opBatchInputs.WorkingDirectory )
        
        # Connect (clone) the feature operator inputs from 
        #  the interactive workflow's features operator (which gets them from the GUI)
        opBatchFeatures.Scales.connect( opTrainingFeatures.Scales )
        opBatchFeatures.FeatureIds.connect( opTrainingFeatures.FeatureIds )
        opBatchFeatures.SelectionMatrix.connect( opTrainingFeatures.SelectionMatrix )
        
        # Classifier and LabelsCount are provided by the interactive workflow
        opBatchPredictionPipeline.Classifier.connect( opClassify.Classifier )
        opBatchPredictionPipeline.MaxLabel.connect( opClassify.MaxLabelValue )
        opBatchPredictionPipeline.FreezePredictions.setValue( False )
        
        # Provide these for the gui
        opBatchResults.RawData.connect( opBatchInputs.Image )
        opBatchResults.PmapColors.connect( opClassify.PmapColors )
        opBatchResults.LabelNames.connect( opClassify.LabelNames )
        opBatchResults.UpperBound.connect( opClassify.UpperBound )
        
        # Connect Image pathway:
        # Input Image -> Features Op -> Prediction Op -> Export
        opBatchFeatures.InputImage.connect( opBatchInputs.Image )
        opBatchPredictionPipeline.FeatureImages.connect( opBatchFeatures.OutputImage )
        
        opBatchResults.SelectionNames.setValue( ['Probabilities'] )        
        # opBatchResults.Inputs is indexed by [lane][selection],
        # Use OpTranspose to allow connection.
        opTransposeBatchInputs = OpTransposeSlots( parent=self )
        opTransposeBatchInputs.OutputLength.setValue(0)
        opTransposeBatchInputs.Inputs.resize(1)
        opTransposeBatchInputs.Inputs[0].connect( opBatchPredictionPipeline.HeadlessPredictionProbabilities ) # selection 0
        
        # Now opTransposeBatchInputs.Outputs is level-2 indexed by [lane][selection]
        opBatchResults.Inputs.connect( opTransposeBatchInputs.Outputs )

        # We don't actually need the cached path in the batch pipeline.
        # Just connect the uncached features here to satisfy the operator.
        opBatchPredictionPipeline.CachedFeatureImages.connect( opBatchFeatures.OutputImage )

        self.opBatchPredictionPipeline = opBatchPredictionPipeline

    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.
        
        If the user provided command-line arguments, use them to configure 
        the workflow for batch mode and export all results.
        (This workflow's headless mode supports only batch mode for now.)
        """
        # Configure the batch data selection operator.
        if self._batch_input_args and (self._batch_input_args.input_files or self._batch_input_args.raw_data):
            self.batchInputApplet.configure_operator_with_parsed_args( self._batch_input_args )
        
        # Configure the data export operator.
        if self._batch_export_args:
            self.batchResultsApplet.configure_operator_with_parsed_args( self._batch_export_args )

        if self._batch_input_args and self.countingApplet.topLevelOperator.classifier_cache._dirty:
            logger.warn("Your project file has no classifier.  "
                        "A new classifier will be trained for this run.")

        if self._headless:
            # In headless mode, let's see the messages from the training operator.
            logging.getLogger("lazyflow.operators.classifierOperators").setLevel(logging.DEBUG)
        
        if self._headless and self._batch_input_args and self._batch_export_args:
            # Make sure we're using the up-to-date classifier.
            self.countingApplet.topLevelOperator.FreezePredictions.setValue(False)

            csv_path = self.parsed_counting_workflow_args.csv_export_file
            if csv_path:
                logger.info( "Exporting Object Counts to {}".format(csv_path) )
                sys.stdout.write("Progress: ")
                sys.stdout.flush()
                def print_progress( progress ):
                    sys.stdout.write( "{:.1f} ".format( progress ) )
                    sys.stdout.flush()

                self.batchResultsApplet.progressSignal.connect(print_progress)
                req = self.batchResultsApplet.prepareExportObjectCountsToCsv( csv_path )
                req.wait()

                # Finished.
                sys.stdout.write("\n")
                sys.stdout.flush()
            else:
                # Now run the batch export and report progress....
                opBatchDataExport = self.batchResultsApplet.topLevelOperator
                for i, opExportDataLaneView in enumerate(opBatchDataExport):
                    logger.info( "Exporting object density image {} to {}".format(i, opExportDataLaneView.ExportPath.value) )
        
                    sys.stdout.write( "Result {}/{} Progress: ".format( i, len( opBatchDataExport ) ) )
                    sys.stdout.flush()
                    def print_progress( progress ):
                        sys.stdout.write( "{:.1f} ".format( progress ) )
                        sys.stdout.flush()
        
                    # If the operator provides a progress signal, use it.
                    slotProgressSignal = opExportDataLaneView.progressSignal
                    slotProgressSignal.subscribe( print_progress )
                    opExportDataLaneView.run_export()
                    
                    # Finished.
                    sys.stdout.write("\n")

    def handleAppletStateUpdateRequested(self):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.statusUpdateSignal`
        """
        # If no data, nothing else is ready.
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        input_ready = len(opDataSelection.ImageGroup) > 0 and not self.dataSelectionApplet.busy

        opFeatureSelection = self.featureSelectionApplet.topLevelOperator
        featureOutput = opFeatureSelection.OutputImage
        features_ready = input_ready and \
                         len(featureOutput) > 0 and  \
                         featureOutput[0].ready() and \
                         (TinyVector(featureOutput[0].meta.shape) > 0).all()

        opDataExport = self.dataExportApplet.topLevelOperator
        predictions_ready = features_ready and \
                            len(opDataExport.Inputs) > 0 and \
                            opDataExport.Inputs[0][0].ready() and \
                            (TinyVector(opDataExport.Inputs[0][0].meta.shape) > 0).all()

        self._shell.setAppletEnabled(self.featureSelectionApplet, input_ready)
        self._shell.setAppletEnabled(self.countingApplet, features_ready)
        self._shell.setAppletEnabled(self.dataExportApplet, predictions_ready and not self.dataExportApplet.busy)
        
        # Training workflow must be fully configured before batch can be used
        self._shell.setAppletEnabled(self.batchInputApplet, predictions_ready)

        opBatchDataSelection = self.batchInputApplet.topLevelOperator
        batch_input_ready = predictions_ready and \
                            len(opBatchDataSelection.ImageGroup) > 0
        self._shell.setAppletEnabled(self.batchResultsApplet, batch_input_ready)
        
        # Lastly, check for certain "busy" conditions, during which we 
        #  should prevent the shell from closing the project.
        busy = False
        busy |= self.dataSelectionApplet.busy
        busy |= self.featureSelectionApplet.busy
        busy |= self.dataExportApplet.busy
        self._shell.enableProjectChanges( not busy )
Ejemplo n.º 49
0
class CarvingWorkflow(Workflow):
    
    workflowName = "Carving"
    defaultAppletIndex = 1 # show DataSelection by default
    
    @property
    def applets(self):
        return self._applets
    
    @property
    def imageNameListSlot(self):
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, hintoverlayFile=None, pmapoverlayFile=None, *args, **kwargs):
        if hintoverlayFile is not None:
            assert isinstance(hintoverlayFile, str), "hintoverlayFile should be a string, not '%s'" % type(hintoverlayFile)
        if pmapoverlayFile is not None:
            assert isinstance(pmapoverlayFile, str), "pmapoverlayFile should be a string, not '%s'" % type(pmapoverlayFile)

        graph = Graph()
        
        super(CarvingWorkflow, self).__init__(shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs)
        self.workflow_cmdline_args = workflow_cmdline_args
        
        data_instructions = "Select your input data using the 'Raw Data' tab shown on the right"
        
        ## Create applets 
        self.projectMetadataApplet = ProjectMetadataApplet()
        self.dataSelectionApplet = DataSelectionApplet( self,
                                                        "Input Data",
                                                        "Input Data",
                                                        supportIlastik05Import=True,
                                                        batchDataGui=False,
                                                        instructionText=data_instructions,
                                                        max_lanes=1 )
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( DATA_ROLES )
        
        self.preprocessingApplet = PreprocessingApplet(workflow=self,
                                           title = "Preprocessing",
                                           projectFileGroupName="preprocessing")
        
        self.carvingApplet = CarvingApplet(workflow=self,
                                           projectFileGroupName="carving",
                                           hintOverlayFile=hintoverlayFile,
                                           pmapOverlayFile=pmapoverlayFile)
        
        #self.carvingApplet.topLevelOperator.MST.connect(self.preprocessingApplet.topLevelOperator.PreprocessedData)
        
        # Expose to shell
        self._applets = []
        self._applets.append(self.projectMetadataApplet)
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.preprocessingApplet)
        self._applets.append(self.carvingApplet)

    def connectLane(self, laneIndex):
        ## Access applet operators
        opData = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex)
        opPreprocessing = self.preprocessingApplet.topLevelOperator.getLane(laneIndex)
        opCarvingLane = self.carvingApplet.topLevelOperator.getLane(laneIndex)
        
        opCarvingLane.connectToPreprocessingApplet(self.preprocessingApplet)
        op5 = OpReorderAxes(parent=self)
        op5.AxisOrder.setValue("txyzc")
        op5.Input.connect(opData.Image)

        ## Connect operators
        opPreprocessing.InputData.connect(op5.Output)
        #opCarvingTopLevel.RawData.connect(op5.output)
        opCarvingLane.InputData.connect(op5.Output)
        opCarvingLane.FilteredInputData.connect(opPreprocessing.FilteredImage)
        opCarvingLane.MST.connect(opPreprocessing.PreprocessedData)
        opCarvingLane.UncertaintyType.setValue("none")
        
        # Special input-input connection: WriteSeeds metadata must mirror the input data
        opCarvingLane.WriteSeeds.connect( opCarvingLane.InputData )
        
        self.preprocessingApplet.enableDownstream(False)

    def handleAppletStateUpdateRequested(self):
        # If no data, nothing else is ready.
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        input_ready = len(opDataSelection.ImageGroup) > 0

        # If preprocessing isn't configured yet, don't allow carving
        preprocessed_data_ready = input_ready and self.preprocessingApplet._enabledDS
        
        # Enable each applet as appropriate
        self._shell.setAppletEnabled(self.preprocessingApplet, input_ready)
        self._shell.setAppletEnabled(self.carvingApplet, preprocessed_data_ready)

    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.

        If the user provided command-line arguments, apply them to the workflow operators.
        Currently, we support command-line configuration of:
        - DataSelection
        - Preprocessing, in which case preprocessing is immediately executed
        """
        # If input data files were provided on the command line, configure the DataSelection applet now.
        # (Otherwise, we assume the project already had a dataset selected.)
        input_data_args, unused_args = DataSelectionApplet.parse_known_cmdline_args(self.workflow_cmdline_args, DATA_ROLES)
        if input_data_args.raw_data:
            self.dataSelectionApplet.configure_operator_with_parsed_args(input_data_args)

        #
        # Parse the remaining cmd-line arguments
        #
        filter_indexes = { 'bright-lines' : OpFilter.HESSIAN_BRIGHT,
                           'dark-lines'   : OpFilter.HESSIAN_DARK,
                           'step-edges'   : OpFilter.STEP_EDGES,
                           'original'     : OpFilter.RAW,
                           'inverted'     : OpFilter.RAW_INVERTED }

        parser = argparse.ArgumentParser()
        parser.add_argument('--run-preprocessing', action='store_true')
        parser.add_argument('--preprocessing-sigma', type=float, required=False)
        parser.add_argument('--preprocessing-filter', required=False, type=str.lower,
                            choices=filter_indexes.keys())

        parsed_args, unused_args = parser.parse_known_args(unused_args)
        if unused_args:
            logger.warn("Did not use the following command-line arguments: {}".format(unused_args))

        # Execute pre-processing.
        if parsed_args.run_preprocessing:
            if len(self.preprocessingApplet.topLevelOperator) != 1:
                raise RuntimeError("Can't run preprocessing on a project with no images.")

            opPreprocessing = self.preprocessingApplet.topLevelOperator.getLane(0) # Carving has only one 'lane'

            # If user provided parameters, override the defaults.
            if parsed_args.preprocessing_sigma is not None:
                opPreprocessing.Sigma.setValue(parsed_args.preprocessing_sigma)

            if parsed_args.preprocessing_filter:
                filter_index = filter_indexes[parsed_args.preprocessing_filter]
                opPreprocessing.Filter.setValue(filter_index)

            logger.info("Running Preprocessing...")
            opPreprocessing.PreprocessedData[:].wait()
            logger.info("FINISHED Preprocessing...")

            logger.info("Saving project...")
            self._shell.projectManager.saveProject()
            logger.info("Done saving.")
Ejemplo n.º 50
0
 def parse_known_cmdline_args(self, cmdline_args):
     # We use the same parser as the DataSelectionApplet
     role_names = self.dataSelectionApplet.topLevelOperator.DatasetRoles.value
     parsed_args, unused_args = DataSelectionApplet.parse_known_cmdline_args(cmdline_args, role_names)
     return parsed_args, unused_args
Ejemplo n.º 51
0
class DataConversionWorkflow(Workflow):
    """
    Simple workflow for converting data between formats.
    Has only two 'interactive' applets (Data Selection and Data Export), plus the BatchProcessing applet.    

    Supports headless mode. For example:
    
    .. code-block::

        python ilastik.py --headless 
                          --new_project=NewTemporaryProject.ilp
                          --workflow=DataConversionWorkflow
                          --output_format="png sequence"
                          ~/input1.h5
                          ~/input2.h5

    .. note:: Beware of issues related to absolute vs. relative paths.
              Relative links are stored relative to the project file.

              To avoid this issue entirely, either 
                 (1) use only absolute filepaths
              or (2) cd into your project file's directory before launching ilastik.
    
    """
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, *args, **kwargs):

        
        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs)
        self._applets = []

        # Instantiate DataSelection applet
        self.dataSelectionApplet = DataSelectionApplet(self, 
                                                       "Input Data", 
                                                       "Input Data", 
                                                       supportIlastik05Import=True)

        # Configure global DataSelection settings
        role_names = ["Input Data"]
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( role_names )

        # Instantiate DataExport applet
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( ["Input"] )        

        # No special data pre/post processing necessary in this workflow, 
        #   but this is where we'd hook it up if we needed it.
        #
        #self.dataExportApplet.prepare_for_entire_export = self.prepare_for_entire_export
        #self.dataExportApplet.prepare_lane_for_export = self.prepare_lane_for_export
        #self.dataExportApplet.post_process_lane_export = self.post_process_lane_export
        #self.dataExportApplet.post_process_entire_export = self.post_process_entire_export

        # Instantiate BatchProcessing applet
        self.batchProcessingApplet = BatchProcessingApplet(self, 
                                                           "Batch Processing", 
                                                           self.dataSelectionApplet, 
                                                           self.dataExportApplet)

        # Expose our applets in a list (for the shell to use)
        self._applets.append( self.dataSelectionApplet )
        self._applets.append( self.dataExportApplet )
        self._applets.append(self.batchProcessingApplet)

        # Parse command-line arguments
        # Command-line args are applied in onProjectLoaded(), below.
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args( workflow_cmdline_args )
            self._batch_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args( unused_args, role_names )
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warn("Unused command-line args: {}".format( unused_args ))

    @property
    def applets(self):
        """
        Overridden from Workflow base class.
        """
        return self._applets

    @property
    def imageNameListSlot(self):
        """
        Overridden from Workflow base class.
        """
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def prepareForNewLane(self, laneIndex):
        """
        Overridden from Workflow base class.
        Called immediately before connectLane()
        """
        # No preparation necessary.
        pass

    def connectLane(self, laneIndex):
        """
        Overridden from Workflow base class.
        """
        # Get a *view* of each top-level operator, specific to the current lane.
        opDataSelectionView = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex)
        opDataExportView = self.dataExportApplet.topLevelOperator.getLane(laneIndex)

        # Now connect the operators together for this lane.
        # Most workflows would have more to do here, but this workflow is super simple:
        # We just connect input to export
        opDataExportView.RawDatasetInfo.connect( opDataSelectionView.DatasetGroup[RAW_DATA_ROLE_INDEX] )        
        opDataExportView.Inputs.resize( 1 )
        opDataExportView.Inputs[RAW_DATA_ROLE_INDEX].connect( opDataSelectionView.ImageGroup[RAW_DATA_ROLE_INDEX] )

        # There is no special "raw" display layer in this workflow.
        #opDataExportView.RawData.connect( opDataSelectionView.ImageGroup[0] )

    def handleNewLanesAdded(self):
        """
        Overridden from Workflow base class.
        Called immediately AFTER connectLane() and the dataset is loaded into the workflow.
        """
        # No special handling required.
        pass

    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.
        
        If the user provided command-line arguments, use them to configure 
        the workflow inputs and output settings.
        """
        # Configure the data export operator.
        if self._data_export_args:
            self.dataExportApplet.configure_operator_with_parsed_args( self._data_export_args )

        if self._headless and self._batch_input_args and self._data_export_args:
            logger.info("Beginning Batch Processing")
            self.batchProcessingApplet.run_export_from_parsed_args(self._batch_input_args)
            logger.info("Completed Batch Processing")

    def handleAppletStateUpdateRequested(self):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.statusUpdateSignal`
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        input_ready = len(opDataSelection.ImageGroup) > 0

        opDataExport = self.dataExportApplet.topLevelOperator
        export_data_ready = input_ready and \
                            len(opDataExport.Inputs[0]) > 0 and \
                            opDataExport.Inputs[0][0].ready() and \
                            (TinyVector(opDataExport.Inputs[0][0].meta.shape) > 0).all()

        self._shell.setAppletEnabled(self.dataSelectionApplet, not self.batchProcessingApplet.busy)
        self._shell.setAppletEnabled(self.dataExportApplet, export_data_ready and not self.batchProcessingApplet.busy)
        self._shell.setAppletEnabled(self.batchProcessingApplet, export_data_ready)
        
        # Lastly, check for certain "busy" conditions, during which we 
        #  should prevent the shell from closing the project.
        busy = False
        busy |= self.dataSelectionApplet.busy
        busy |= self.dataExportApplet.busy
        busy |= self.batchProcessingApplet.busy
        self._shell.enableProjectChanges( not busy )