def parse_known_cmdline_args(cls, cmdline_args, parsed_args=None):
        """
        Helper function for headless workflows.
        Parses commandline args that can be used to configure the ``TrackingBaseDataExportApplet`` top-level operator
        as well as its parent, the ``DataExportApplet``,
        and returns ``(parsed_args, unused_args)``, similar to ``argparse.ArgumentParser.parse_known_args()``
        See also: :py:meth:`configure_operator_with_parsed_args()`.

        parsed_args: Already-parsed args as returned from an ArgumentParser from make_cmdline_parser(), above.
                     If not provided, make_cmdline_parser().parse_known_args() will be used.
        """
        unused_args = []
        if parsed_args is None:
            arg_parser = cls.make_cmdline_parser()
            parsed_args, unused_args = arg_parser.parse_known_args(cmdline_args)

        msg = "Error parsing command-line arguments for tracking data export applet.\n"
        if parsed_args.export_plugin is not None:
            if parsed_args.export_source is None or parsed_args.export_source.lower() != "plugin":
                msg += "export_plugin should only be specified if export_source is set to Plugin."
                raise Exception(msg)

        if parsed_args.export_source is not None and parsed_args.export_source.lower() == "plugin" and parsed_args.export_plugin is None:
                msg += "export_plugin MUST be specified if export_source is set to Plugin!"
                raise Exception(msg)

        if parsed_args.export_plugin == 'Fiji-MaMuT':
            if parsed_args.big_data_viewer_xml_file is None:
                msg += "'big_data_viewer_xml_file' MUST be specified if 'export_plugin' is set to 'Fiji-MaMuT'"
                raise Exception(msg)

        # configure parent applet
        DataExportApplet.parse_known_cmdline_args(cmdline_args, parsed_args)

        return parsed_args, unused_args
示例#2
0
def convert_predictions_to_uncertainties( input_path, parsed_export_args ):
    """
    Read exported pixel predictions and calculate/export the uncertainties.
    
    input_path: The path to the prediction output file. If hdf5, must include the internal dataset name.
    parsed_export_args: The already-parsed cmd-line arguments generated from a DataExportApplet-compatible ArgumentParser.
    """
    graph = Graph()
    opReader = OpInputDataReader(graph=graph)
    opReader.WorkingDirectory.setValue( os.getcwd() )
    opReader.FilePath.setValue(input_path)
    
    opUncertainty = OpEnsembleMargin( graph=graph )
    opUncertainty.Input.connect( opReader.Output )
        
    opExport = OpFormattedDataExport( graph=graph )
    opExport.Input.connect( opUncertainty.Output )

    # Apply command-line arguments.
    DataExportApplet._configure_operator_with_parsed_args(parsed_export_args, opExport)

    last_progress = [-1]
    def print_progress(progress_percent):
        if progress_percent != last_progress[0]:
            last_progress[0] = progress_percent
            sys.stdout.write( " {}".format(progress_percent) )
    
    print "Exporting results to : {}".format( opExport.ExportPath.value )    
    sys.stdout.write("Progress:")
    opExport.progressSignal.subscribe(print_progress)

    # Begin export
    opExport.run_export()
    sys.stdout.write("\n")
    print "DONE."
    def _configure_operator_with_parsed_args(cls, parsed_args, opTrackingDataExport):
        """
        Helper function for headless workflows.
        Configures the given export operator according to the settings provided in ``parsed_args``,
        and depending on the chosen export source it also configures the parent operator opDataExport

        :param parsed_args: Must be an ``argparse.Namespace`` as returned by :py:meth:`parse_known_cmdline_args()`.
        """
        if parsed_args.export_source is not None:
            opTrackingDataExport.SelectedExportSource.setValue(parsed_args.export_source)

            if parsed_args.export_source == OpTrackingBaseDataExport.PluginOnlyName:
                opTrackingDataExport.SelectedPlugin.setValue(parsed_args.export_plugin)

                # if a plugin was selected, the only thing we need is the export name
                if parsed_args.output_filename_format:
                    if hasattr(opTrackingDataExport, 'WorkingDirectory'):
                        # By default, most workflows consider the project directory to be the 'working directory'
                        #  for transforming relative paths (e.g. export locations) into absolute locations.
                        # A user would probably expect paths to be relative to his cwd when he launches
                        #  ilastik from the command line.
                        opTrackingDataExport.WorkingDirectory.disconnect()
                        opTrackingDataExport.WorkingDirectory.setValue(os.getcwd())

                    opTrackingDataExport.OutputFilenameFormat.setValue(parsed_args.output_filename_format)

                return # We don't want to configure the super operator so we quit now!
            else:
                # set some value to the SelectedPlugin slot so that it is ready
                opTrackingDataExport.SelectedPlugin.setValue("None")

        # configure super operator
        DataExportApplet._configure_operator_with_parsed_args(parsed_args, opTrackingDataExport)
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, *args, **kwargs):

        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(
            shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs
        )
        self._applets = []

        # Instantiate DataSelection applet
        self.dataSelectionApplet = DataSelectionApplet(
            self, "Input Data", "Input Data", supportIlastik05Import=True, forceAxisOrder=None
        )

        # Configure global DataSelection settings
        role_names = ["Input Data"]
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(role_names)

        # Instantiate DataExport applet
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)
        opDataExport.SelectionNames.setValue(["Input"])

        # No special data pre/post processing necessary in this workflow,
        #   but this is where we'd hook it up if we needed it.
        #
        # self.dataExportApplet.prepare_for_entire_export = self.prepare_for_entire_export
        # self.dataExportApplet.prepare_lane_for_export = self.prepare_lane_for_export
        # self.dataExportApplet.post_process_lane_export = self.post_process_lane_export
        # self.dataExportApplet.post_process_entire_export = self.post_process_entire_export

        # Instantiate BatchProcessing applet
        self.batchProcessingApplet = BatchProcessingApplet(
            self, "Batch Processing", self.dataSelectionApplet, self.dataExportApplet
        )

        # Expose our applets in a list (for the shell to use)
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.dataExportApplet)
        self._applets.append(self.batchProcessingApplet)

        # Parse command-line arguments
        # Command-line args are applied in onProjectLoaded(), below.
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args(workflow_cmdline_args)
            self._batch_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args(
                unused_args, role_names
            )
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warning("Unused command-line args: {}".format(unused_args))
def convert_predictions_to_segmentation(input_paths, parsed_export_args):
    """
    Read exported pixel predictions and calculate/export the segmentation.

    input_path: The path to the prediction output file. If hdf5, must include the internal dataset name.
    parsed_export_args: The already-parsed cmd-line arguments generated from a DataExportApplet-compatible ArgumentParser.
    """
    graph = Graph()
    opReader = OpInputDataReader(graph=graph)
    opReader.WorkingDirectory.setValue(os.getcwd())

    opArgmaxChannel = OpArgmaxChannel(graph=graph)
    opArgmaxChannel.Input.connect(opReader.Output)

    opExport = OpFormattedDataExport(graph=graph)
    opExport.Input.connect(opArgmaxChannel.Output)

    # Apply command-line arguments.
    DataExportApplet._configure_operator_with_parsed_args(
        parsed_export_args, opExport)

    last_progress = [-1]

    def print_progress(progress_percent):
        if progress_percent != last_progress[0]:
            last_progress[0] = progress_percent
            sys.stdout.write(" {}".format(progress_percent))

    opExport.progressSignal.subscribe(print_progress)

    for input_path in input_paths:
        opReader.FilePath.setValue(input_path)

        input_pathcomp = PathComponents(input_path)
        opExport.OutputFilenameFormat.setValue(str(
            input_pathcomp.externalPath))

        output_path = opExport.ExportPath.value
        output_pathcomp = PathComponents(output_path)
        output_pathcomp.filenameBase += "_Segmentation"
        opExport.OutputFilenameFormat.setValue(
            str(output_pathcomp.externalPath))

        print("Exporting results to : {}".format(opExport.ExportPath.value))
        sys.stdout.write("Progress:")
        # Begin export
        opExport.run_export()
        sys.stdout.write("\n")
    print("DONE.")
    def _configure_operator_with_parsed_args(cls, parsed_args,
                                             opTrackingDataExport):
        """
        Helper function for headless workflows.
        Configures the given export operator according to the settings provided in ``parsed_args``,
        and depending on the chosen export source it also configures the parent operator opDataExport

        :param parsed_args: Must be an ``argparse.Namespace`` as returned by :py:meth:`parse_known_cmdline_args()`.
        """
        if parsed_args.export_source is not None:
            opTrackingDataExport.SelectedExportSource.setValue(
                parsed_args.export_source)

            if parsed_args.export_source == OpTrackingBaseDataExport.PluginOnlyName:
                opTrackingDataExport.SelectedPlugin.setValue(
                    parsed_args.export_plugin)
                if parsed_args.export_plugin == "Fiji-MaMuT":
                    if opTrackingDataExport.AdditionalPluginArguments.ready():
                        additional_plugin_args = opTrackingDataExport.AdditionalPluginArguments.value
                    else:
                        additional_plugin_args = {}
                    additional_plugin_args[
                        "bdvFilepath"] = parsed_args.big_data_viewer_xml_file
                    opTrackingDataExport.AdditionalPluginArguments.setValue(
                        additional_plugin_args)

                # if a plugin was selected, the only thing we need is the export name
                if parsed_args.output_filename_format:
                    if hasattr(opTrackingDataExport, "WorkingDirectory"):
                        # By default, most workflows consider the project directory to be the 'working directory'
                        #  for transforming relative paths (e.g. export locations) into absolute locations.
                        # A user would probably expect paths to be relative to his cwd when he launches
                        #  ilastik from the command line.
                        opTrackingDataExport.WorkingDirectory.disconnect()
                        opTrackingDataExport.WorkingDirectory.setValue(
                            os.getcwd())

                    opTrackingDataExport.OutputFilenameFormat.setValue(
                        parsed_args.output_filename_format)

                return  # We don't want to configure the super operator so we quit now!
            else:
                # set some value to the SelectedPlugin slot so that it is ready
                opTrackingDataExport.SelectedPlugin.setValue("None")

        # configure super operator
        DataExportApplet._configure_operator_with_parsed_args(
            parsed_args, opTrackingDataExport)
示例#7
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args, *args, **kwargs):

        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(shell,
                                                     headless,
                                                     workflow_cmdline_args,
                                                     project_creation_args,
                                                     graph=graph,
                                                     *args,
                                                     **kwargs)
        self._applets = []

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            batchDataGui=False,
            force5d=False)

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        role_names = ["Input Data"]
        opDataSelection.DatasetRoles.setValue(role_names)

        self.dataExportApplet = DataExportApplet(self, "Data Export")

        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)
        opDataExport.SelectionNames.setValue(["Input"])

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.dataExportApplet)

        # Parse command-line arguments
        # Command-line args are applied in onProjectLoaded(), below.
        self._workflow_cmdline_args = workflow_cmdline_args
        self._data_input_args = None
        self._data_export_args = None
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args(
                unused_args)
            self._data_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args(
                workflow_cmdline_args, role_names)
            if unused_args:
                logger.warn("Unused command-line args: {}".format(unused_args))
示例#8
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args, *args, **kwargs):

        # Create a graph to be shared by all operators
        graph = Graph()
        super(LayerViewerWorkflow, self).__init__(shell,
                                                  headless,
                                                  workflow_cmdline_args,
                                                  project_creation_args,
                                                  graph=graph,
                                                  *args,
                                                  **kwargs)
        self._applets = []

        # Roles

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(self, "Input Data",
                                                       "Input Data")
        self.viewerApplet = LayerViewerApplet(self)
        self.dataExportApplet = DataExportApplet(self, "Data Export")
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.SelectionNames.setValue(ROLES)

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(ROLES)

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.viewerApplet)
        self._applets.append(self.dataExportApplet)

        self._workflow_cmdline_args = workflow_cmdline_args
示例#9
0
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, *args, **kwargs):

        
        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs)
        self._applets = []

        # Instantiate DataSelection applet
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            forceAxisOrder=None)

        # Configure global DataSelection settings
        role_names = ["Input Data"]
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( role_names )

        # Instantiate DataExport applet
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( ["Input"] )        

        # No special data pre/post processing necessary in this workflow, 
        #   but this is where we'd hook it up if we needed it.
        #
        #self.dataExportApplet.prepare_for_entire_export = self.prepare_for_entire_export
        #self.dataExportApplet.prepare_lane_for_export = self.prepare_lane_for_export
        #self.dataExportApplet.post_process_lane_export = self.post_process_lane_export
        #self.dataExportApplet.post_process_entire_export = self.post_process_entire_export

        # Instantiate BatchProcessing applet
        self.batchProcessingApplet = BatchProcessingApplet(self, 
                                                           "Batch Processing", 
                                                           self.dataSelectionApplet, 
                                                           self.dataExportApplet)

        # Expose our applets in a list (for the shell to use)
        self._applets.append( self.dataSelectionApplet )
        self._applets.append( self.dataExportApplet )
        self._applets.append(self.batchProcessingApplet)

        # Parse command-line arguments
        # Command-line args are applied in onProjectLoaded(), below.
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args( workflow_cmdline_args )
            self._batch_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args( unused_args, role_names )
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warning("Unused command-line args: {}".format( unused_args ))
示例#10
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args, *args, **kwargs):

        # Create a graph to be shared by all operators
        graph = Graph()
        super(LayerViewerWorkflow, self).__init__(shell,
                                                  headless,
                                                  workflow_cmdline_args,
                                                  project_creation_args,
                                                  graph=graph,
                                                  *args,
                                                  **kwargs)
        self._applets = []

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            forceAxisOrder='tzyxc')
        self.viewerApplet = LayerViewerApplet(self)
        self.dataExportApplet = DataExportApplet(self, "Data Export")
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.SelectionNames.setValue(['Raw Data', 'Other Data'])

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(["Raw Data", "Other Data"])

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.viewerApplet)
        self._applets.append(self.dataExportApplet)

        self._workflow_cmdline_args = workflow_cmdline_args
    def __init__(self, headless, workflow_cmdline_args, *args, **kwargs):

        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(headless,
                                                     graph=graph,
                                                     *args,
                                                     **kwargs)
        self._applets = []

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            batchDataGui=False,
            force5d=False)

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(["Input Data"])

        self.dataExportApplet = DataExportApplet(self, "Data Export")

        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.dataExportApplet)

        self._workflow_cmdline_args = workflow_cmdline_args
示例#12
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args):
        # Create a graph to be shared by all operators
        graph = Graph()
        super(ThresholdMaskingWorkflow, self).__init__(shell,
                                                       headless,
                                                       workflow_cmdline_args,
                                                       project_creation_args,
                                                       graph=graph)
        self._applets = []

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            batchDataGui=False)
        self.thresholdMaskingApplet = ThresholdMaskingApplet(
            self, "Thresholding", "Thresholding Stage 1")
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(self.ROLE_NAMES)

        # Instantiate DataExport applet
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)
        opDataExport.SelectionNames.setValue(self.EXPORT_NAMES)

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.thresholdMaskingApplet)
        self._applets.append(self.dataExportApplet)
    def __init__(self, headless, workflow_cmdline_args, *args, **kwargs):
        
        # Create a graph to be shared by all operators
        graph = Graph()
        super(LayerViewerWorkflow, self).__init__(headless, graph=graph, *args, **kwargs)
        self._applets = []

        # Create applets 
        self.dataSelectionApplet = DataSelectionApplet(self, 
                                                       "Input Data", 
                                                       "Input Data", 
                                                       supportIlastik05Import=True, 
                                                       batchDataGui=False,
                                                       force5d=True)
        self.viewerApplet = LayerViewerApplet(self)
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( ["Raw Data", "Other Data"] )

        self._applets.append( self.dataSelectionApplet )
        self._applets.append( self.viewerApplet )
        self._applets.append( self.dataExportApplet )

        self._workflow_cmdline_args = workflow_cmdline_args
def convert_predictions_to_segmentation( input_paths, parsed_export_args ):
    """
    Read exported pixel predictions and calculate/export the segmentation.
    
    input_path: The path to the prediction output file. If hdf5, must include the internal dataset name.
    parsed_export_args: The already-parsed cmd-line arguments generated from a DataExportApplet-compatible ArgumentParser.
    """
    graph = Graph()
    opReader = OpInputDataReader(graph=graph)
    opReader.WorkingDirectory.setValue( os.getcwd() )

    opArgmaxChannel = OpArgmaxChannel( graph=graph )
    opArgmaxChannel.Input.connect( opReader.Output )
        
    opExport = OpFormattedDataExport( graph=graph )
    opExport.Input.connect( opArgmaxChannel.Output )

    # Apply command-line arguments.
    DataExportApplet._configure_operator_with_parsed_args(parsed_export_args, opExport)

    last_progress = [-1]
    def print_progress(progress_percent):
        if progress_percent != last_progress[0]:
            last_progress[0] = progress_percent
            sys.stdout.write( " {}".format(progress_percent) )
    opExport.progressSignal.subscribe(print_progress)

    for input_path in input_paths: 
        opReader.FilePath.setValue(input_path)

        input_pathcomp = PathComponents(input_path)
        opExport.OutputFilenameFormat.setValue(str(input_pathcomp.externalPath))

        output_path = opExport.ExportPath.value
        output_pathcomp = PathComponents( output_path )
        output_pathcomp.filenameBase += "_Segmentation"
        opExport.OutputFilenameFormat.setValue(str(output_pathcomp.externalPath))
        
        print "Exporting results to : {}".format( opExport.ExportPath.value )    
        sys.stdout.write("Progress:")
        # Begin export
        opExport.run_export()
        sys.stdout.write("\n")
    print "DONE."
 def make_cmdline_parser(cls, starting_parser=None):
     """
     Returns a command line parser that includes all parameters from the parent applet and adds export_plugin.
     """
     arg_parser = DataExportApplet.make_cmdline_parser(starting_parser)
     arg_parser.add_argument('--export_plugin',
                             help='Plugin name for exporting tracking results',
                             required=False,
                             default=None)
     return arg_parser
示例#16
0
 def make_cmdline_parser(cls, starting_parser=None):
     """
     Returns a command line parser that includes all parameters from the parent applet and adds export_plugin.
     """
     arg_parser = DataExportApplet.make_cmdline_parser(starting_parser)
     arg_parser.add_argument(
         '--export_plugin',
         help='Plugin name for exporting tracking results',
         required=False,
         default=None)
     return arg_parser
示例#17
0
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_workflow, *args, **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()

        super(WsdtWorkflow, self).__init__( shell, headless, workflow_cmdline_args, project_creation_workflow, graph=graph, *args, **kwargs)
        self._applets = []

        # -- DataSelection applet
        #
        self.dataSelectionApplet = DataSelectionApplet(self, "Input Data", "Input Data")

        # Dataset inputs
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( self.ROLE_NAMES )

        # -- Wsdt applet
        #
        self.wsdtApplet = WsdtApplet(self, "Watershed", "Wsdt Watershed")

        # -- DataExport applet
        #
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( self.EXPORT_NAMES )

        # -- BatchProcessing applet
        #
        self.batchProcessingApplet = BatchProcessingApplet(self,
                                                           "Batch Processing",
                                                           self.dataSelectionApplet,
                                                           self.dataExportApplet)

        # -- Expose applets to shell
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.wsdtApplet)
        self._applets.append(self.dataExportApplet)
        self._applets.append(self.batchProcessingApplet)

        # -- Parse command-line arguments
        #    (Command-line args are applied in onProjectLoaded(), below.)
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args( workflow_cmdline_args )
            self._batch_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args( unused_args, role_names )
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warning("Unused command-line args: {}".format( unused_args ))
 def make_cmdline_parser(cls, starting_parser=None):
     """
     Returns a command line parser that includes all parameters from the parent applet and adds export_plugin.
     """
     arg_parser = DataExportApplet.make_cmdline_parser(starting_parser)
     arg_parser.add_argument('--export_plugin',
                             help='Plugin name for exporting tracking results',
                             required=False,
                             default=None)
     arg_parser.add_argument('--big_data_viewer_xml_file',
                             help='Path to BigDataViewer XML file. Required if export_plugin=Fiji-MaMuT',
                             required=False,
                             default=None)
     return arg_parser
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, *args, **kwargs):

        
        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs)
        self._applets = []

        # Create applets 
        self.dataSelectionApplet = DataSelectionApplet(self, 
                                                       "Input Data", 
                                                       "Input Data", 
                                                       supportIlastik05Import=True, 
                                                       batchDataGui=False,
                                                       force5d=False)

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        role_names = ["Input Data"]
        opDataSelection.DatasetRoles.setValue( role_names )

        self.dataExportApplet = DataExportApplet(self, "Data Export")

        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( ["Input"] )        

        self._applets.append( self.dataSelectionApplet )
        self._applets.append( self.dataExportApplet )

        # Parse command-line arguments
        # Command-line args are applied in onProjectLoaded(), below.
        self._workflow_cmdline_args = workflow_cmdline_args
        self._data_input_args = None
        self._data_export_args = None
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args( unused_args )
            self._data_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args( workflow_cmdline_args, role_names )
            if unused_args:
                logger.warn("Unused command-line args: {}".format( unused_args ))
def all_dataset_internal_paths(f):
    """
    Return a list of all the internal datasets in an hdf5 file.
    """
    allkeys = []
    f.visit(allkeys.append)
    dataset_keys = filter(lambda key: isinstance(f[key], h5py.Dataset), 
                          allkeys)
    return dataset_keys

if __name__ == "__main__":
    import sys
    import argparse
    
    # Construct a parser with all the 'normal' export options, and add arg for input_path.
    parser = DataExportApplet.make_cmdline_parser( argparse.ArgumentParser() )
    parser.add_argument("input_path", help="Path to your exported predictions.")
    parsed_args = parser.parse_args()    
    
    # As a convenience, auto-determine the internal dataset path if possible.
    path_comp = PathComponents(parsed_args.input_path, os.getcwd())
    if path_comp.extension in PathComponents.HDF5_EXTS and path_comp.internalDatasetName == "":
        
        with h5py.File(path_comp.externalPath, 'r') as f:
            all_internal_paths = all_dataset_internal_paths(f)

        if len(all_internal_paths) == 1:
            path_comp.internalPath = all_internal_paths[0]
            parsed_args.input_path = path_comp.totalPath()
        elif len(all_internal_paths) == 0:
            sys.stderr.write("Could not find any datasets in your input file.")
示例#21
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_workflow, *args, **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()

        super(WsdtWorkflow, self).__init__(shell,
                                           headless,
                                           workflow_cmdline_args,
                                           project_creation_workflow,
                                           graph=graph,
                                           *args,
                                           **kwargs)
        self._applets = []

        # -- DataSelection applet
        #
        self.dataSelectionApplet = DataSelectionApplet(self, "Input Data",
                                                       "Input Data")

        # Dataset inputs
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(self.ROLE_NAMES)

        # -- Wsdt applet
        #
        self.wsdtApplet = WsdtApplet(self, "Watershed", "Wsdt Watershed")

        # -- DataExport applet
        #
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)
        opDataExport.SelectionNames.setValue(self.EXPORT_NAMES)

        # -- BatchProcessing applet
        #
        self.batchProcessingApplet = BatchProcessingApplet(
            self, "Batch Processing", self.dataSelectionApplet,
            self.dataExportApplet)

        # -- Expose applets to shell
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.wsdtApplet)
        self._applets.append(self.dataExportApplet)
        self._applets.append(self.batchProcessingApplet)

        # -- Parse command-line arguments
        #    (Command-line args are applied in onProjectLoaded(), below.)
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args(
                workflow_cmdline_args)
            self._batch_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args(
                unused_args, role_names)
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warning("Unused command-line args: {}".format(unused_args))
    """
    Return a list of all the internal datasets in an hdf5 file.
    """
    allkeys = []
    f.visit(allkeys.append)
    dataset_keys = filter(lambda key: isinstance(f[key], h5py.Dataset), 
                          allkeys)
    return dataset_keys

if __name__ == "__main__":
    import sys
    import argparse
    #sys.argv += "/tmp/example_slice.h5/data /tmp/example_slice2.h5/data --export_drange=(0,255) --output_format=png --pipeline_result_drange=(1,2)".split()
    
    # Construct a parser with all the 'normal' export options, and add arg for prediction_image_paths.
    parser = DataExportApplet.make_cmdline_parser( argparse.ArgumentParser() )
    parser.add_argument("prediction_image_paths", nargs='+', help="Path(s) to your exported predictions.")
    parsed_args = parser.parse_args()
    parsed_args, unused_args = DataExportApplet.parse_known_cmdline_args( sys.argv[1:], parsed_args )
    
    # As a convenience, auto-determine the internal dataset path if possible.
    for index, input_path in enumerate(parsed_args.prediction_image_paths):
        path_comp = PathComponents(input_path, os.getcwd())        
        if not parsed_args.output_internal_path:
            parsed_args.output_internal_path = "segmentation"
        if path_comp.extension in PathComponents.HDF5_EXTS and path_comp.internalDatasetName == "":            
            with h5py.File(path_comp.externalPath, 'r') as f:
                all_internal_paths = all_dataset_internal_paths(f)
    
            if len(all_internal_paths) == 1:
                path_comp.internalPath = all_internal_paths[0]
def all_dataset_internal_paths(f):
    """
    Return a list of all the internal datasets in an hdf5 file.
    """
    allkeys = []
    f.visit(allkeys.append)
    dataset_keys = [key for key in allkeys if isinstance(f[key], h5py.Dataset)]
    return dataset_keys


if __name__ == "__main__":
    import sys
    import argparse

    # Construct a parser with all the 'normal' export options, and add arg for input_path.
    parser = DataExportApplet.make_cmdline_parser(argparse.ArgumentParser())
    parser.add_argument("input_path",
                        help="Path to your exported predictions.")
    parsed_args = parser.parse_args()

    # As a convenience, auto-determine the internal dataset path if possible.
    path_comp = PathComponents(parsed_args.input_path, os.getcwd())
    if path_comp.extension in PathComponents.HDF5_EXTS and path_comp.internalDatasetName == "":

        with h5py.File(path_comp.externalPath, "r") as f:
            all_internal_paths = all_dataset_internal_paths(f)

        if len(all_internal_paths) == 1:
            path_comp.internalPath = all_internal_paths[0]
            parsed_args.input_path = path_comp.totalPath()
        elif len(all_internal_paths) == 0:
示例#24
0
class DataConversionWorkflow(Workflow):
    """
    Simple workflow for converting data between formats.
    Has only two 'interactive' applets (Data Selection and Data Export), plus the BatchProcessing applet.    

    Supports headless mode. For example:
    
    .. code-block::

        python ilastik.py --headless 
                          --new_project=NewTemporaryProject.ilp
                          --workflow=DataConversionWorkflow
                          --output_format="png sequence"
                          ~/input1.h5
                          ~/input2.h5

    .. note:: Beware of issues related to absolute vs. relative paths.
              Relative links are stored relative to the project file.

              To avoid this issue entirely, either 
                 (1) use only absolute filepaths
              or (2) cd into your project file's directory before launching ilastik.
    
    """
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, *args, **kwargs):

        
        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs)
        self._applets = []

        # Instantiate DataSelection applet
        self.dataSelectionApplet = DataSelectionApplet(self, 
                                                       "Input Data", 
                                                       "Input Data", 
                                                       supportIlastik05Import=True)

        # Configure global DataSelection settings
        role_names = ["Input Data"]
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( role_names )

        # Instantiate DataExport applet
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( ["Input"] )        

        # No special data pre/post processing necessary in this workflow, 
        #   but this is where we'd hook it up if we needed it.
        #
        #self.dataExportApplet.prepare_for_entire_export = self.prepare_for_entire_export
        #self.dataExportApplet.prepare_lane_for_export = self.prepare_lane_for_export
        #self.dataExportApplet.post_process_lane_export = self.post_process_lane_export
        #self.dataExportApplet.post_process_entire_export = self.post_process_entire_export

        # Instantiate BatchProcessing applet
        self.batchProcessingApplet = BatchProcessingApplet(self, 
                                                           "Batch Processing", 
                                                           self.dataSelectionApplet, 
                                                           self.dataExportApplet)

        # Expose our applets in a list (for the shell to use)
        self._applets.append( self.dataSelectionApplet )
        self._applets.append( self.dataExportApplet )
        self._applets.append(self.batchProcessingApplet)

        # Parse command-line arguments
        # Command-line args are applied in onProjectLoaded(), below.
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args( workflow_cmdline_args )
            self._batch_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args( unused_args, role_names )
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warn("Unused command-line args: {}".format( unused_args ))

    @property
    def applets(self):
        """
        Overridden from Workflow base class.
        """
        return self._applets

    @property
    def imageNameListSlot(self):
        """
        Overridden from Workflow base class.
        """
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def prepareForNewLane(self, laneIndex):
        """
        Overridden from Workflow base class.
        Called immediately before connectLane()
        """
        # No preparation necessary.
        pass

    def connectLane(self, laneIndex):
        """
        Overridden from Workflow base class.
        """
        # Get a *view* of each top-level operator, specific to the current lane.
        opDataSelectionView = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex)
        opDataExportView = self.dataExportApplet.topLevelOperator.getLane(laneIndex)

        # Now connect the operators together for this lane.
        # Most workflows would have more to do here, but this workflow is super simple:
        # We just connect input to export
        opDataExportView.RawDatasetInfo.connect( opDataSelectionView.DatasetGroup[RAW_DATA_ROLE_INDEX] )        
        opDataExportView.Inputs.resize( 1 )
        opDataExportView.Inputs[RAW_DATA_ROLE_INDEX].connect( opDataSelectionView.ImageGroup[RAW_DATA_ROLE_INDEX] )

        # There is no special "raw" display layer in this workflow.
        #opDataExportView.RawData.connect( opDataSelectionView.ImageGroup[0] )

    def handleNewLanesAdded(self):
        """
        Overridden from Workflow base class.
        Called immediately AFTER connectLane() and the dataset is loaded into the workflow.
        """
        # No special handling required.
        pass

    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.
        
        If the user provided command-line arguments, use them to configure 
        the workflow inputs and output settings.
        """
        # Configure the data export operator.
        if self._data_export_args:
            self.dataExportApplet.configure_operator_with_parsed_args( self._data_export_args )

        if self._headless and self._batch_input_args and self._data_export_args:
            logger.info("Beginning Batch Processing")
            self.batchProcessingApplet.run_export_from_parsed_args(self._batch_input_args)
            logger.info("Completed Batch Processing")

    def handleAppletStateUpdateRequested(self):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.statusUpdateSignal`
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        input_ready = len(opDataSelection.ImageGroup) > 0

        opDataExport = self.dataExportApplet.topLevelOperator
        export_data_ready = input_ready and \
                            len(opDataExport.Inputs[0]) > 0 and \
                            opDataExport.Inputs[0][0].ready() and \
                            (TinyVector(opDataExport.Inputs[0][0].meta.shape) > 0).all()

        self._shell.setAppletEnabled(self.dataSelectionApplet, not self.batchProcessingApplet.busy)
        self._shell.setAppletEnabled(self.dataExportApplet, export_data_ready and not self.batchProcessingApplet.busy)
        self._shell.setAppletEnabled(self.batchProcessingApplet, export_data_ready)
        
        # Lastly, check for certain "busy" conditions, during which we 
        #  should prevent the shell from closing the project.
        busy = False
        busy |= self.dataSelectionApplet.busy
        busy |= self.dataExportApplet.busy
        busy |= self.batchProcessingApplet.busy
        self._shell.enableProjectChanges( not busy )
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_workflow, *args, **kwargs):
        self.stored_classifier = None

        # Create a graph to be shared by all operators
        graph = Graph()

        super(EdgeTrainingWithMulticutWorkflow, self).__init__( shell, headless, workflow_cmdline_args, project_creation_workflow, graph=graph, *args, **kwargs)
        self._applets = []

        # -- DataSelection applet
        #
        self.dataSelectionApplet = DataSelectionApplet(self, "Input Data", "Input Data", forceAxisOrder=['zyxc', 'yxc'])

        # Dataset inputs
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( self.ROLE_NAMES )

        # -- Watershed applet
        #
        self.wsdtApplet = WsdtApplet(self, "DT Watershed", "DT Watershed")

        # -- Edge training AND Multicut applet
        # 
        self.edgeTrainingWithMulticutApplet = EdgeTrainingWithMulticutApplet(self, "Training and Multicut", "Training and Multicut")
        opEdgeTrainingWithMulticut = self.edgeTrainingWithMulticutApplet.topLevelOperator
        DEFAULT_FEATURES = { self.ROLE_NAMES[self.DATA_ROLE_RAW]: ['standard_edge_mean'] }
        opEdgeTrainingWithMulticut.FeatureNames.setValue( DEFAULT_FEATURES )

        # -- DataExport applet
        #
        self.dataExportApplet = DataExportApplet(self, "Data Export")
        self.dataExportApplet.prepare_for_entire_export = self.prepare_for_entire_export
        self.dataExportApplet.post_process_entire_export = self.post_process_entire_export

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( self.EXPORT_NAMES )

        # -- BatchProcessing applet
        #
        self.batchProcessingApplet = BatchProcessingApplet(self,
                                                           "Batch Processing",
                                                           self.dataSelectionApplet,
                                                           self.dataExportApplet)

        # -- Expose applets to shell
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.wsdtApplet)
        self._applets.append(self.edgeTrainingWithMulticutApplet)
        self._applets.append(self.dataExportApplet)
        self._applets.append(self.batchProcessingApplet)

        # -- Parse command-line arguments
        #    (Command-line args are applied in onProjectLoaded(), below.)
        # Parse workflow-specific command-line args
        parser = argparse.ArgumentParser()
        parser.add_argument('--retrain', help="Re-train the classifier based on labels stored in the project file, and re-save.", action="store_true")
        self.parsed_workflow_args, unused_args = parser.parse_known_args(workflow_cmdline_args)
        if unused_args:
            # Parse batch export/input args.
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args( unused_args )
            self._batch_input_args, unused_args = self.batchProcessingApplet.parse_known_cmdline_args( unused_args )
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warn("Unused command-line args: {}".format( unused_args ))
        
        if not self._headless:
            shell.currentAppletChanged.connect( self.handle_applet_changed )
class EdgeTrainingWithMulticutWorkflow(Workflow):
    workflowName = "Edge Training With Multicut"
    workflowDisplayName = "(BETA) Edge Training With Multicut"

    workflowDescription = "A workflow based around training a classifier for merging superpixels and joining them via multicut."
    defaultAppletIndex = 0 # show DataSelection by default

    DATA_ROLE_RAW = 0
    DATA_ROLE_PROBABILITIES = 1
    DATA_ROLE_SUPERPIXELS = 2
    DATA_ROLE_GROUNDTRUTH = 3
    ROLE_NAMES = ['Raw Data', 'Probabilities', 'Superpixels', 'Groundtruth']
    EXPORT_NAMES = ['Multicut Segmentation']

    @property
    def applets(self):
        return self._applets

    @property
    def imageNameListSlot(self):
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_workflow, *args, **kwargs):
        self.stored_classifier = None

        # Create a graph to be shared by all operators
        graph = Graph()

        super(EdgeTrainingWithMulticutWorkflow, self).__init__( shell, headless, workflow_cmdline_args, project_creation_workflow, graph=graph, *args, **kwargs)
        self._applets = []

        # -- DataSelection applet
        #
        self.dataSelectionApplet = DataSelectionApplet(self, "Input Data", "Input Data", forceAxisOrder=['zyxc', 'yxc'])

        # Dataset inputs
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( self.ROLE_NAMES )

        # -- Watershed applet
        #
        self.wsdtApplet = WsdtApplet(self, "DT Watershed", "DT Watershed")

        # -- Edge training AND Multicut applet
        # 
        self.edgeTrainingWithMulticutApplet = EdgeTrainingWithMulticutApplet(self, "Training and Multicut", "Training and Multicut")
        opEdgeTrainingWithMulticut = self.edgeTrainingWithMulticutApplet.topLevelOperator
        DEFAULT_FEATURES = { self.ROLE_NAMES[self.DATA_ROLE_RAW]: ['standard_edge_mean'] }
        opEdgeTrainingWithMulticut.FeatureNames.setValue( DEFAULT_FEATURES )

        # -- DataExport applet
        #
        self.dataExportApplet = DataExportApplet(self, "Data Export")
        self.dataExportApplet.prepare_for_entire_export = self.prepare_for_entire_export
        self.dataExportApplet.post_process_entire_export = self.post_process_entire_export

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( self.EXPORT_NAMES )

        # -- BatchProcessing applet
        #
        self.batchProcessingApplet = BatchProcessingApplet(self,
                                                           "Batch Processing",
                                                           self.dataSelectionApplet,
                                                           self.dataExportApplet)

        # -- Expose applets to shell
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.wsdtApplet)
        self._applets.append(self.edgeTrainingWithMulticutApplet)
        self._applets.append(self.dataExportApplet)
        self._applets.append(self.batchProcessingApplet)

        # -- Parse command-line arguments
        #    (Command-line args are applied in onProjectLoaded(), below.)
        # Parse workflow-specific command-line args
        parser = argparse.ArgumentParser()
        parser.add_argument('--retrain', help="Re-train the classifier based on labels stored in the project file, and re-save.", action="store_true")
        self.parsed_workflow_args, unused_args = parser.parse_known_args(workflow_cmdline_args)
        if unused_args:
            # Parse batch export/input args.
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args( unused_args )
            self._batch_input_args, unused_args = self.batchProcessingApplet.parse_known_cmdline_args( unused_args )
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warn("Unused command-line args: {}".format( unused_args ))
        
        if not self._headless:
            shell.currentAppletChanged.connect( self.handle_applet_changed )

    def prepareForNewLane(self, laneIndex):
        """
        Overridden from Workflow base class.
        Called immediately before a new lane is added to the workflow.
        """
        opEdgeTrainingWithMulticut = self.edgeTrainingWithMulticutApplet.topLevelOperator
        opClassifierCache = opEdgeTrainingWithMulticut.opEdgeTraining.opClassifierCache

        # When the new lane is added, dirty notifications will propagate throughout the entire graph.
        # This means the classifier will be marked 'dirty' even though it is still usable.
        # Before that happens, let's store the classifier, so we can restore it in handleNewLanesAdded(), below.
        if opClassifierCache.Output.ready() and \
           not opClassifierCache._dirty:
            self.stored_classifier = opClassifierCache.Output.value
        else:
            self.stored_classifier = None
        
    def handleNewLanesAdded(self):
        """
        Overridden from Workflow base class.
        Called immediately after a new lane is added to the workflow and initialized.
        """
        opEdgeTrainingWithMulticut = self.edgeTrainingWithMulticutApplet.topLevelOperator
        opClassifierCache = opEdgeTrainingWithMulticut.opEdgeTraining.opClassifierCache

        # Restore classifier we saved in prepareForNewLane() (if any)
        if self.stored_classifier:
            opClassifierCache.forceValue(self.stored_classifier)
            # Release reference
            self.stored_classifier = None

    def connectLane(self, laneIndex):
        """
        Override from base class.
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex)
        opWsdt = self.wsdtApplet.topLevelOperator.getLane(laneIndex)
        opEdgeTrainingWithMulticut = self.edgeTrainingWithMulticutApplet.topLevelOperator.getLane(laneIndex)
        opDataExport = self.dataExportApplet.topLevelOperator.getLane(laneIndex)

        # RAW DATA: Convert to float32
        opConvertRaw = OpConvertDtype( parent=self )
        opConvertRaw.ConversionDtype.setValue( np.float32 )
        opConvertRaw.Input.connect( opDataSelection.ImageGroup[self.DATA_ROLE_RAW] )

        # PROBABILITIES: Convert to float32
        opConvertProbabilities = OpConvertDtype( parent=self )
        opConvertProbabilities.ConversionDtype.setValue( np.float32 )
        opConvertProbabilities.Input.connect( opDataSelection.ImageGroup[self.DATA_ROLE_PROBABILITIES] )

        # PROBABILITIES: Normalize drange to [0.0, 1.0]
        opNormalizeProbabilities = OpPixelOperator( parent=self )
        def normalize_inplace(a):
            drange = opNormalizeProbabilities.Input.meta.drange
            if drange is None or (drange[0] == 0.0 and drange[1] == 1.0):
                return a
            a[:] -= drange[0]
            a[:] /= ( drange[1] - drange[0] )
            return a
        opNormalizeProbabilities.Input.connect( opConvertProbabilities.Output )
        opNormalizeProbabilities.Function.setValue( normalize_inplace )

        # GROUNDTRUTH: Convert to uint32, relabel, and cache
        opConvertGroundtruth = OpConvertDtype( parent=self )
        opConvertGroundtruth.ConversionDtype.setValue( np.uint32 )
        opConvertGroundtruth.Input.connect( opDataSelection.ImageGroup[self.DATA_ROLE_GROUNDTRUTH] )

        opRelabelGroundtruth = OpRelabelConsecutive( parent=self )
        opRelabelGroundtruth.Input.connect( opConvertGroundtruth.Output )
        
        opGroundtruthCache = OpBlockedArrayCache( parent=self )
        opGroundtruthCache.CompressionEnabled.setValue(True)
        opGroundtruthCache.Input.connect( opRelabelGroundtruth.Output )

        # watershed inputs
        opWsdt.RawData.connect( opDataSelection.ImageGroup[self.DATA_ROLE_RAW] )
        opWsdt.Input.connect( opNormalizeProbabilities.Output )

        # Actual computation is done with both RawData and Probabilities
        opStackRawAndVoxels = OpSimpleStacker( parent=self )
        opStackRawAndVoxels.Images.resize(2)
        opStackRawAndVoxels.Images[0].connect( opConvertRaw.Output )
        opStackRawAndVoxels.Images[1].connect( opNormalizeProbabilities.Output )
        opStackRawAndVoxels.AxisFlag.setValue('c')

        # If superpixels are available from a file, use it.
        opSuperpixelsSelect = OpPrecomputedInput( ignore_dirty_input=True, parent=self )
        opSuperpixelsSelect.PrecomputedInput.connect( opDataSelection.ImageGroup[self.DATA_ROLE_SUPERPIXELS] )
        opSuperpixelsSelect.SlowInput.connect( opWsdt.Superpixels )

        # If the superpixel file changes, then we have to remove the training labels from the image
        opEdgeTraining = opEdgeTrainingWithMulticut.opEdgeTraining
        def handle_new_superpixels( *args ):
            opEdgeTraining.handle_dirty_superpixels( opEdgeTraining.Superpixels )
        opDataSelection.ImageGroup[self.DATA_ROLE_SUPERPIXELS].notifyReady( handle_new_superpixels )
        opDataSelection.ImageGroup[self.DATA_ROLE_SUPERPIXELS].notifyUnready( handle_new_superpixels )

        # edge training inputs
        opEdgeTrainingWithMulticut.RawData.connect( opDataSelection.ImageGroup[self.DATA_ROLE_RAW] ) # Used for visualization only
        opEdgeTrainingWithMulticut.VoxelData.connect( opStackRawAndVoxels.Output )
        opEdgeTrainingWithMulticut.Superpixels.connect( opSuperpixelsSelect.Output )
        opEdgeTrainingWithMulticut.GroundtruthSegmentation.connect( opGroundtruthCache.Output )

        # DataExport inputs
        opDataExport.RawData.connect( opDataSelection.ImageGroup[self.DATA_ROLE_RAW] )
        opDataExport.RawDatasetInfo.connect( opDataSelection.DatasetGroup[self.DATA_ROLE_RAW] )        
        opDataExport.Inputs.resize( len(self.EXPORT_NAMES) )
        opDataExport.Inputs[0].connect( opEdgeTrainingWithMulticut.Output )
        for slot in opDataExport.Inputs:
            assert slot.partner is not None
        
    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.
        
        If the user provided command-line arguments, use them to configure 
        the workflow inputs and output settings.
        """
        # Configure the data export operator.
        if self._data_export_args:
            self.dataExportApplet.configure_operator_with_parsed_args( self._data_export_args )

        # Retrain the classifier?
        if self.parsed_workflow_args.retrain:
            self._force_retrain_classifier(projectManager)

        if self._headless and self._batch_input_args and self._data_export_args:
            # Make sure the watershed can be computed if necessary.
            opWsdt = self.wsdtApplet.topLevelOperator
            opWsdt.FreezeCache.setValue( False )

            # Error checks
            if (self._batch_input_args.raw_data
            and len(self._batch_input_args.probabilities) != len(self._batch_input_args.raw_data) ):
                msg = "Error: Your input file lists are malformed.\n"
                msg += "Usage: run_ilastik.sh --headless --raw_data <file1> <file2>... --probabilities <file1> <file2>..."
                sys.exit(msg)

            if  (self._batch_input_args.superpixels
            and (not self._batch_input_args.raw_data or len(self._batch_input_args.superpixels) != len(self._batch_input_args.raw_data) ) ):
                msg = "Error: Wrong number of superpixel file inputs."
                sys.exit(msg)

            logger.info("Beginning Batch Processing")
            self.batchProcessingApplet.run_export_from_parsed_args(self._batch_input_args)
            logger.info("Completed Batch Processing")

    def _force_retrain_classifier(self, projectManager):
        logger.info("Retraining edge classifier...")
        op = self.edgeTrainingWithMulticutApplet.topLevelOperator

        # Cause the classifier to be dirty so it is forced to retrain.
        # (useful if the stored labels or features were changed outside ilastik)
        op.FeatureNames.setDirty()
        
        # Request the classifier, which forces training
        new_classifier = op.opEdgeTraining.opClassifierCache.Output.value
        if new_classifier is None:
            raise RuntimeError("Classifier could not be trained! Check your labels and features.")

        # store new classifier to project file
        projectManager.saveProject(force_all_save=False)

    def prepare_for_entire_export(self):
        """
        Assigned to DataExportApplet.prepare_for_entire_export
        (See above.)
        """
        # While exporting results, the segmentation cache should not be "frozen"
        self.freeze_status = self.edgeTrainingWithMulticutApplet.topLevelOperator.FreezeCache.value
        self.edgeTrainingWithMulticutApplet.topLevelOperator.FreezeCache.setValue(False)

    def post_process_entire_export(self):
        """
        Assigned to DataExportApplet.post_process_entire_export
        (See above.)
        """
        # After export is finished, re-freeze the segmentation cache.
        self.edgeTrainingWithMulticutApplet.topLevelOperator.FreezeCache.setValue(self.freeze_status)


    def handleAppletStateUpdateRequested(self):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.appletStateUpdateRequested`
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opWsdt = self.wsdtApplet.topLevelOperator
        opEdgeTrainingWithMulticut = self.edgeTrainingWithMulticutApplet.topLevelOperator
        opDataExport = self.dataExportApplet.topLevelOperator

        # If no data, nothing else is ready.
        input_ready = len(opDataSelection.ImageGroup) > 0 and not self.dataSelectionApplet.busy

        superpixels_available_from_file = False
        lane_index = self._shell.currentImageIndex
        if lane_index != -1:
            superpixels_available_from_file = opDataSelection.ImageGroup[lane_index][self.DATA_ROLE_SUPERPIXELS].ready()

        superpixels_ready = opWsdt.Superpixels.ready()

        # The user isn't allowed to touch anything while batch processing is running.
        batch_processing_busy = self.batchProcessingApplet.busy

        self._shell.setAppletEnabled( self.dataSelectionApplet,             not batch_processing_busy )
        self._shell.setAppletEnabled( self.wsdtApplet,                      not batch_processing_busy and input_ready and not superpixels_available_from_file )
        self._shell.setAppletEnabled( self.edgeTrainingWithMulticutApplet,  not batch_processing_busy and input_ready and superpixels_ready )
        self._shell.setAppletEnabled( self.dataExportApplet,                not batch_processing_busy and input_ready and opEdgeTrainingWithMulticut.Output.ready())
        self._shell.setAppletEnabled( self.batchProcessingApplet,           not batch_processing_busy and input_ready )

        # Lastly, check for certain "busy" conditions, during which we
        #  should prevent the shell from closing the project.
        busy = False
        busy |= self.dataSelectionApplet.busy
        busy |= self.wsdtApplet.busy
        busy |= self.edgeTrainingWithMulticutApplet.busy
        busy |= self.dataExportApplet.busy
        busy |= self.batchProcessingApplet.busy
        self._shell.enableProjectChanges( not busy )

    def handle_applet_changed(self, prev_index, current_index):
        if prev_index != current_index:
            # If the user is viewing an applet downstream of the WSDT applet,
            # make sure the superpixels are always up-to-date.
            opWsdt = self.wsdtApplet.topLevelOperator
            opWsdt.FreezeCache.setValue( self._shell.currentAppletIndex <= self.applets.index( self.wsdtApplet ) )

            # Same for the multicut segmentation
            opMulticut = self.edgeTrainingWithMulticutApplet.topLevelOperator
            opMulticut.FreezeCache.setValue( self._shell.currentAppletIndex <= self.applets.index( self.edgeTrainingWithMulticutApplet ) )
            
示例#27
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_workflow, *args, **kwargs):
        self.stored_classifier = None

        # Create a graph to be shared by all operators
        graph = Graph()

        super(MulticutWorkflow, self).__init__(shell,
                                               headless,
                                               workflow_cmdline_args,
                                               project_creation_workflow,
                                               graph=graph,
                                               *args,
                                               **kwargs)
        self._applets = []

        # -- DataSelection applet
        #
        self.dataSelectionApplet = DataSelectionApplet(
            self, "Input Data", "Input Data", forceAxisOrder=['zyxc', 'yxc'])

        # Dataset inputs
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(self.ROLE_NAMES)

        # -- Watershed applet
        #
        self.wsdtApplet = WsdtApplet(self, "DT Watershed", "DT Watershed")

        # -- Edge training applet
        #
        self.edgeTrainingApplet = EdgeTrainingApplet(self, "Edge Training",
                                                     "Edge Training")
        opEdgeTraining = self.edgeTrainingApplet.topLevelOperator
        DEFAULT_FEATURES = {
            self.ROLE_NAMES[self.DATA_ROLE_RAW]: ['standard_edge_mean']
        }
        opEdgeTraining.FeatureNames.setValue(DEFAULT_FEATURES)

        # -- Multicut applet
        #
        self.multicutApplet = MulticutApplet(self, "Multicut Segmentation",
                                             "Multicut Segmentation")

        # -- DataExport applet
        #
        self.dataExportApplet = DataExportApplet(self, "Data Export")
        self.dataExportApplet.prepare_for_entire_export = self.prepare_for_entire_export
        self.dataExportApplet.post_process_entire_export = self.post_process_entire_export

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)
        opDataExport.SelectionNames.setValue(self.EXPORT_NAMES)

        # -- BatchProcessing applet
        #
        self.batchProcessingApplet = BatchProcessingApplet(
            self, "Batch Processing", self.dataSelectionApplet,
            self.dataExportApplet)

        # -- Expose applets to shell
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.wsdtApplet)
        self._applets.append(self.edgeTrainingApplet)
        self._applets.append(self.multicutApplet)
        self._applets.append(self.dataExportApplet)
        self._applets.append(self.batchProcessingApplet)

        # -- Parse command-line arguments
        #    (Command-line args are applied in onProjectLoaded(), below.)
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args(
                workflow_cmdline_args)
            self._batch_input_args, unused_args = self.batchProcessingApplet.parse_known_cmdline_args(
                unused_args)
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warning("Unused command-line args: {}".format(unused_args))

        if not self._headless:
            shell.currentAppletChanged.connect(self.handle_applet_changed)
class DataConversionWorkflow(Workflow):
    """
    Simple workflow for converting data between formats.  Has only two applets: Data Selection and Data Export.
    
    Also supports a command-line interface for headless mode.
    
    For example:
    
    .. code-block:: bash

        python ilastik.py --headless --new_project=NewTemporaryProject.ilp --workflow=DataConversionWorkflow --output_format="png sequence" ~/input1.h5 ~/input2.h5
    
    Or if you have an existing project with input files already selected and configured:

    .. code-block:: bash

        python ilastik.py --headless --project=MyProject.ilp --output_format=jpeg
    
    .. note:: Beware of issues related to absolute vs. relative paths.  Relative links are stored relative to the project file.
              To avoid this issue entirely, either 
                 (1) use only absolute filepaths
              or (2) cd into your project file's directory before launching ilastik.
    
    """
    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, *args, **kwargs):

        
        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs)
        self._applets = []

        # Create applets 
        self.dataSelectionApplet = DataSelectionApplet(self, 
                                                       "Input Data", 
                                                       "Input Data", 
                                                       supportIlastik05Import=True, 
                                                       batchDataGui=False,
                                                       force5d=False)

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        role_names = ["Input Data"]
        opDataSelection.DatasetRoles.setValue( role_names )

        self.dataExportApplet = DataExportApplet(self, "Data Export")

        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( ["Input"] )        

        self._applets.append( self.dataSelectionApplet )
        self._applets.append( self.dataExportApplet )

        # Parse command-line arguments
        # Command-line args are applied in onProjectLoaded(), below.
        self._workflow_cmdline_args = workflow_cmdline_args
        self._data_input_args = None
        self._data_export_args = None
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args( unused_args )
            self._data_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args( workflow_cmdline_args, role_names )
            if unused_args:
                logger.warn("Unused command-line args: {}".format( unused_args ))

    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.
        
        If the user provided command-line arguments, use them to configure 
        the workflow inputs and output settings.
        """
        # Configure the batch data selection operator.
        if self._data_input_args and self._data_input_args.input_files:
            self.dataSelectionApplet.configure_operator_with_parsed_args( self._data_input_args )
        
        # Configure the data export operator.
        if self._data_export_args:
            self.dataExportApplet.configure_operator_with_parsed_args( self._data_export_args )

        if self._headless and self._data_input_args and self._data_export_args:
            # Now run the export and report progress....
            opDataExport = self.dataExportApplet.topLevelOperator
            for i, opExportDataLaneView in enumerate(opDataExport):
                logger.info( "Exporting file #{} to {}".format(i, opExportDataLaneView.ExportPath.value) )
    
                sys.stdout.write( "Result #{}/{} Progress: ".format( i, len( opDataExport ) ) )
                def print_progress( progress ):
                    sys.stdout.write( "{} ".format( progress ) )
    
                # If the operator provides a progress signal, use it.
                slotProgressSignal = opExportDataLaneView.progressSignal
                slotProgressSignal.subscribe( print_progress )
                opExportDataLaneView.run_export()
                
                # Finished.
                sys.stdout.write("\n")

    def connectLane(self, laneIndex):
        opDataSelectionView = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex)
        opDataExportView = self.dataExportApplet.topLevelOperator.getLane(laneIndex)

        opDataExportView.RawDatasetInfo.connect( opDataSelectionView.DatasetGroup[0] )        
        opDataExportView.Inputs.resize( 1 )
        opDataExportView.Inputs[0].connect( opDataSelectionView.ImageGroup[0] )

        # There is no special "raw" display layer in this workflow.
        #opDataExportView.RawData.connect( opDataSelectionView.ImageGroup[0] )

    @property
    def applets(self):
        return self._applets

    @property
    def imageNameListSlot(self):
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def handleAppletStateUpdateRequested(self):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.statusUpdateSignal`
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        input_ready = len(opDataSelection.ImageGroup) > 0

        opDataExport = self.dataExportApplet.topLevelOperator
        export_data_ready = input_ready and \
                            len(opDataExport.Inputs[0]) > 0 and \
                            opDataExport.Inputs[0][0].ready() and \
                            (TinyVector(opDataExport.Inputs[0][0].meta.shape) > 0).all()

        self._shell.setAppletEnabled(self.dataExportApplet, export_data_ready)
        
        # Lastly, check for certain "busy" conditions, during which we 
        #  should prevent the shell from closing the project.
        busy = False
        busy |= self.dataSelectionApplet.busy
        busy |= self.dataExportApplet.busy
        self._shell.enableProjectChanges( not busy )
示例#29
0
class EdgeTrainingWithMulticutWorkflow(Workflow):
    workflowName = "Edge Training With Multicut"
    workflowDisplayName = "(BETA) Edge Training With Multicut"

    workflowDescription = "A workflow based around training a classifier for merging superpixels and joining them via multicut."
    defaultAppletIndex = 0  # show DataSelection by default

    DATA_ROLE_RAW = 0
    DATA_ROLE_PROBABILITIES = 1
    DATA_ROLE_SUPERPIXELS = 2
    DATA_ROLE_GROUNDTRUTH = 3
    ROLE_NAMES = ['Raw Data', 'Probabilities', 'Superpixels', 'Groundtruth']
    EXPORT_NAMES = ['Multicut Segmentation']

    @property
    def applets(self):
        return self._applets

    @property
    def imageNameListSlot(self):
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_workflow, *args, **kwargs):
        self.stored_classifier = None

        # Create a graph to be shared by all operators
        graph = Graph()

        super(EdgeTrainingWithMulticutWorkflow,
              self).__init__(shell,
                             headless,
                             workflow_cmdline_args,
                             project_creation_workflow,
                             graph=graph,
                             *args,
                             **kwargs)
        self._applets = []

        # -- DataSelection applet
        #
        self.dataSelectionApplet = DataSelectionApplet(self, "Input Data",
                                                       "Input Data")

        # Dataset inputs
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(self.ROLE_NAMES)

        # -- Watershed applet
        #
        self.wsdtApplet = WsdtApplet(self, "DT Watershed", "DT Watershed")

        # -- Edge training AND Multicut applet
        #
        self.edgeTrainingWithMulticutApplet = EdgeTrainingWithMulticutApplet(
            self, "Training and Multicut", "Training and Multicut")
        opEdgeTrainingWithMulticut = self.edgeTrainingWithMulticutApplet.topLevelOperator
        DEFAULT_FEATURES = {
            self.ROLE_NAMES[self.DATA_ROLE_RAW]: ['standard_edge_mean']
        }
        opEdgeTrainingWithMulticut.FeatureNames.setValue(DEFAULT_FEATURES)

        # -- DataExport applet
        #
        self.dataExportApplet = DataExportApplet(self, "Data Export")
        self.dataExportApplet.prepare_for_entire_export = self.prepare_for_entire_export
        self.dataExportApplet.post_process_entire_export = self.post_process_entire_export

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)
        opDataExport.SelectionNames.setValue(self.EXPORT_NAMES)

        # -- BatchProcessing applet
        #
        self.batchProcessingApplet = BatchProcessingApplet(
            self, "Batch Processing", self.dataSelectionApplet,
            self.dataExportApplet)

        # -- Expose applets to shell
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.wsdtApplet)
        self._applets.append(self.edgeTrainingWithMulticutApplet)
        self._applets.append(self.dataExportApplet)
        self._applets.append(self.batchProcessingApplet)

        # -- Parse command-line arguments
        #    (Command-line args are applied in onProjectLoaded(), below.)
        # Parse workflow-specific command-line args
        parser = argparse.ArgumentParser()
        parser.add_argument(
            '--retrain',
            help=
            "Re-train the classifier based on labels stored in the project file, and re-save.",
            action="store_true")
        self.parsed_workflow_args, unused_args = parser.parse_known_args(
            workflow_cmdline_args)
        if unused_args:
            # Parse batch export/input args.
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args(
                unused_args)
            self._batch_input_args, unused_args = self.batchProcessingApplet.parse_known_cmdline_args(
                unused_args)
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warn("Unused command-line args: {}".format(unused_args))

        if not self._headless:
            shell.currentAppletChanged.connect(self.handle_applet_changed)

    def prepareForNewLane(self, laneIndex):
        """
        Overridden from Workflow base class.
        Called immediately before a new lane is added to the workflow.
        """
        opEdgeTrainingWithMulticut = self.edgeTrainingWithMulticutApplet.topLevelOperator
        opClassifierCache = opEdgeTrainingWithMulticut.opEdgeTraining.opClassifierCache

        # When the new lane is added, dirty notifications will propagate throughout the entire graph.
        # This means the classifier will be marked 'dirty' even though it is still usable.
        # Before that happens, let's store the classifier, so we can restore it in handleNewLanesAdded(), below.
        if opClassifierCache.Output.ready() and \
           not opClassifierCache._dirty:
            self.stored_classifier = opClassifierCache.Output.value
        else:
            self.stored_classifier = None

    def handleNewLanesAdded(self):
        """
        Overridden from Workflow base class.
        Called immediately after a new lane is added to the workflow and initialized.
        """
        opEdgeTrainingWithMulticut = self.edgeTrainingWithMulticutApplet.topLevelOperator
        opClassifierCache = opEdgeTrainingWithMulticut.opEdgeTraining.opClassifierCache

        # Restore classifier we saved in prepareForNewLane() (if any)
        if self.stored_classifier:
            opClassifierCache.forceValue(self.stored_classifier)
            # Release reference
            self.stored_classifier = None

    def connectLane(self, laneIndex):
        """
        Override from base class.
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator.getLane(
            laneIndex)
        opWsdt = self.wsdtApplet.topLevelOperator.getLane(laneIndex)
        opEdgeTrainingWithMulticut = self.edgeTrainingWithMulticutApplet.topLevelOperator.getLane(
            laneIndex)
        opDataExport = self.dataExportApplet.topLevelOperator.getLane(
            laneIndex)

        # RAW DATA: Convert to float32
        opConvertRaw = OpConvertDtype(parent=self)
        opConvertRaw.ConversionDtype.setValue(np.float32)
        opConvertRaw.Input.connect(
            opDataSelection.ImageGroup[self.DATA_ROLE_RAW])

        # PROBABILITIES: Convert to float32
        opConvertProbabilities = OpConvertDtype(parent=self)
        opConvertProbabilities.ConversionDtype.setValue(np.float32)
        opConvertProbabilities.Input.connect(
            opDataSelection.ImageGroup[self.DATA_ROLE_PROBABILITIES])

        # GROUNDTRUTH: Convert to uint32, relabel, and cache
        opConvertGroundtruth = OpConvertDtype(parent=self)
        opConvertGroundtruth.ConversionDtype.setValue(np.uint32)
        opConvertGroundtruth.Input.connect(
            opDataSelection.ImageGroup[self.DATA_ROLE_GROUNDTRUTH])

        opRelabelGroundtruth = OpRelabelConsecutive(parent=self)
        opRelabelGroundtruth.Input.connect(opConvertGroundtruth.Output)

        opGroundtruthCache = OpBlockedArrayCache(parent=self)
        opGroundtruthCache.CompressionEnabled.setValue(True)
        opGroundtruthCache.Input.connect(opRelabelGroundtruth.Output)

        # watershed inputs
        opWsdt.RawData.connect(opDataSelection.ImageGroup[self.DATA_ROLE_RAW])
        opWsdt.Input.connect(
            opDataSelection.ImageGroup[self.DATA_ROLE_PROBABILITIES])

        # Actual computation is done with both RawData and Probabilities
        opStackRawAndVoxels = OpSimpleStacker(parent=self)
        opStackRawAndVoxels.Images.resize(2)
        opStackRawAndVoxels.Images[0].connect(opConvertRaw.Output)
        opStackRawAndVoxels.Images[1].connect(opConvertProbabilities.Output)
        opStackRawAndVoxels.AxisFlag.setValue('c')

        # If superpixels are available from a file, use it.
        opSuperpixelsSelect = OpPrecomputedInput(ignore_dirty_input=True,
                                                 parent=self)
        opSuperpixelsSelect.PrecomputedInput.connect(
            opDataSelection.ImageGroup[self.DATA_ROLE_SUPERPIXELS])
        opSuperpixelsSelect.SlowInput.connect(opWsdt.Superpixels)

        # If the superpixel file changes, then we have to remove the training labels from the image
        opEdgeTraining = opEdgeTrainingWithMulticut.opEdgeTraining

        def handle_new_superpixels(*args):
            opEdgeTraining.handle_dirty_superpixels(opEdgeTraining.Superpixels)

        opDataSelection.ImageGroup[self.DATA_ROLE_SUPERPIXELS].notifyReady(
            handle_new_superpixels)
        opDataSelection.ImageGroup[self.DATA_ROLE_SUPERPIXELS].notifyUnready(
            handle_new_superpixels)

        # edge training inputs
        opEdgeTrainingWithMulticut.RawData.connect(opDataSelection.ImageGroup[
            self.DATA_ROLE_RAW])  # Used for visualization only
        opEdgeTrainingWithMulticut.VoxelData.connect(
            opStackRawAndVoxels.Output)
        opEdgeTrainingWithMulticut.Superpixels.connect(
            opSuperpixelsSelect.Output)
        opEdgeTrainingWithMulticut.GroundtruthSegmentation.connect(
            opGroundtruthCache.Output)

        # DataExport inputs
        opDataExport.RawData.connect(
            opDataSelection.ImageGroup[self.DATA_ROLE_RAW])
        opDataExport.RawDatasetInfo.connect(
            opDataSelection.DatasetGroup[self.DATA_ROLE_RAW])
        opDataExport.Inputs.resize(len(self.EXPORT_NAMES))
        opDataExport.Inputs[0].connect(opEdgeTrainingWithMulticut.Output)
        for slot in opDataExport.Inputs:
            assert slot.partner is not None

    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.
        
        If the user provided command-line arguments, use them to configure 
        the workflow inputs and output settings.
        """
        # Configure the data export operator.
        if self._data_export_args:
            self.dataExportApplet.configure_operator_with_parsed_args(
                self._data_export_args)

        # Retrain the classifier?
        if self.parsed_workflow_args.retrain:
            self._force_retrain_classifier(projectManager)

        if self._headless and self._batch_input_args and self._data_export_args:
            # Make sure the watershed can be computed if necessary.
            opWsdt = self.wsdtApplet.topLevelOperator
            opWsdt.FreezeCache.setValue(False)

            # Error checks
            if (self._batch_input_args.raw_data
                    and len(self._batch_input_args.probabilities) != len(
                        self._batch_input_args.raw_data)):
                msg = "Error: Your input file lists are malformed.\n"
                msg += "Usage: run_ilastik.sh --headless --raw_data <file1> <file2>... --probabilities <file1> <file2>..."
                sys.exit(msg)

            if (self._batch_input_args.superpixels
                    and (not self._batch_input_args.raw_data
                         or len(self._batch_input_args.superpixels) != len(
                             self._batch_input_args.raw_data))):
                msg = "Error: Wrong number of superpixel file inputs."
                sys.exit(msg)

            logger.info("Beginning Batch Processing")
            self.batchProcessingApplet.run_export_from_parsed_args(
                self._batch_input_args)
            logger.info("Completed Batch Processing")

    def _force_retrain_classifier(self, projectManager):
        logger.info("Retraining edge classifier...")
        op = self.edgeTrainingWithMulticutApplet.topLevelOperator

        # Cause the classifier to be dirty so it is forced to retrain.
        # (useful if the stored labels or features were changed outside ilastik)
        op.FeatureNames.setDirty()

        # Request the classifier, which forces training
        new_classifier = op.opEdgeTraining.opClassifierCache.Output.value
        if new_classifier is None:
            raise RuntimeError(
                "Classifier could not be trained! Check your labels and features."
            )

        # store new classifier to project file
        projectManager.saveProject(force_all_save=False)

    def prepare_for_entire_export(self):
        """
        Assigned to DataExportApplet.prepare_for_entire_export
        (See above.)
        """
        # While exporting results, the segmentation cache should not be "frozen"
        self.freeze_status = self.edgeTrainingWithMulticutApplet.topLevelOperator.FreezeCache.value
        self.edgeTrainingWithMulticutApplet.topLevelOperator.FreezeCache.setValue(
            False)

    def post_process_entire_export(self):
        """
        Assigned to DataExportApplet.post_process_entire_export
        (See above.)
        """
        # After export is finished, re-freeze the segmentation cache.
        self.edgeTrainingWithMulticutApplet.topLevelOperator.FreezeCache.setValue(
            self.freeze_status)

    def handleAppletStateUpdateRequested(self):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.appletStateUpdateRequested`
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opWsdt = self.wsdtApplet.topLevelOperator
        opEdgeTrainingWithMulticut = self.edgeTrainingWithMulticutApplet.topLevelOperator
        opDataExport = self.dataExportApplet.topLevelOperator

        # If no data, nothing else is ready.
        input_ready = len(opDataSelection.ImageGroup
                          ) > 0 and not self.dataSelectionApplet.busy

        superpixels_available_from_file = False
        lane_index = self._shell.currentImageIndex
        if lane_index != -1:
            superpixels_available_from_file = opDataSelection.ImageGroup[
                lane_index][self.DATA_ROLE_SUPERPIXELS].ready()

        superpixels_ready = opWsdt.Superpixels.ready()

        # The user isn't allowed to touch anything while batch processing is running.
        batch_processing_busy = self.batchProcessingApplet.busy

        self._shell.setAppletEnabled(self.dataSelectionApplet,
                                     not batch_processing_busy)
        self._shell.setAppletEnabled(
            self.wsdtApplet, not batch_processing_busy and input_ready
            and not superpixels_available_from_file)
        self._shell.setAppletEnabled(
            self.edgeTrainingWithMulticutApplet, not batch_processing_busy
            and input_ready and superpixels_ready)
        self._shell.setAppletEnabled(
            self.dataExportApplet, not batch_processing_busy and input_ready
            and opEdgeTrainingWithMulticut.Output.ready())
        self._shell.setAppletEnabled(self.batchProcessingApplet,
                                     not batch_processing_busy and input_ready)

        # Lastly, check for certain "busy" conditions, during which we
        #  should prevent the shell from closing the project.
        busy = False
        busy |= self.dataSelectionApplet.busy
        busy |= self.wsdtApplet.busy
        busy |= self.edgeTrainingWithMulticutApplet.busy
        busy |= self.dataExportApplet.busy
        busy |= self.batchProcessingApplet.busy
        self._shell.enableProjectChanges(not busy)

    def handle_applet_changed(self, prev_index, current_index):
        if prev_index != current_index:
            # If the user is viewing an applet downstream of the WSDT applet,
            # make sure the superpixels are always up-to-date.
            opWsdt = self.wsdtApplet.topLevelOperator
            opWsdt.FreezeCache.setValue(self._shell.currentAppletIndex <=
                                        self.applets.index(self.wsdtApplet))

            # Same for the multicut segmentation
            opMulticut = self.edgeTrainingWithMulticutApplet.topLevelOperator
            opMulticut.FreezeCache.setValue(
                self._shell.currentAppletIndex <= self.applets.index(
                    self.edgeTrainingWithMulticutApplet))
    Return a list of all the internal datasets in an hdf5 file.
    """
    allkeys = []
    f.visit(allkeys.append)
    dataset_keys = [key for key in allkeys if isinstance(f[key], h5py.Dataset)]
    return dataset_keys


if __name__ == "__main__":
    import sys
    import argparse

    # sys.argv += "/tmp/example_slice.h5/data /tmp/example_slice2.h5/data --export_drange=(0,255) --output_format=png --pipeline_result_drange=(1,2)".split()

    # Construct a parser with all the 'normal' export options, and add arg for prediction_image_paths.
    parser = DataExportApplet.make_cmdline_parser(argparse.ArgumentParser())
    parser.add_argument("prediction_image_paths",
                        nargs="+",
                        help="Path(s) to your exported predictions.")
    parsed_args = parser.parse_args()
    parsed_args, unused_args = DataExportApplet.parse_known_cmdline_args(
        sys.argv[1:], parsed_args)

    # As a convenience, auto-determine the internal dataset path if possible.
    for index, input_path in enumerate(parsed_args.prediction_image_paths):
        path_comp = PathComponents(input_path, os.getcwd())
        if not parsed_args.output_internal_path:
            parsed_args.output_internal_path = "segmentation"
        if path_comp.extension in PathComponents.HDF5_EXTS and path_comp.internalDatasetName == "":
            with h5py.File(path_comp.externalPath, "r") as f:
                all_internal_paths = all_dataset_internal_paths(f)
示例#31
0
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_workflow, *args, **kwargs):
        self.stored_classifier = None

        # Create a graph to be shared by all operators
        graph = Graph()

        super(EdgeTrainingWithMulticutWorkflow,
              self).__init__(shell,
                             headless,
                             workflow_cmdline_args,
                             project_creation_workflow,
                             graph=graph,
                             *args,
                             **kwargs)
        self._applets = []

        # -- DataSelection applet
        #
        self.dataSelectionApplet = DataSelectionApplet(self, "Input Data",
                                                       "Input Data")

        # Dataset inputs
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(self.ROLE_NAMES)

        # -- Watershed applet
        #
        self.wsdtApplet = WsdtApplet(self, "DT Watershed", "DT Watershed")

        # -- Edge training AND Multicut applet
        #
        self.edgeTrainingWithMulticutApplet = EdgeTrainingWithMulticutApplet(
            self, "Training and Multicut", "Training and Multicut")
        opEdgeTrainingWithMulticut = self.edgeTrainingWithMulticutApplet.topLevelOperator
        DEFAULT_FEATURES = {
            self.ROLE_NAMES[self.DATA_ROLE_RAW]: ['standard_edge_mean']
        }
        opEdgeTrainingWithMulticut.FeatureNames.setValue(DEFAULT_FEATURES)

        # -- DataExport applet
        #
        self.dataExportApplet = DataExportApplet(self, "Data Export")
        self.dataExportApplet.prepare_for_entire_export = self.prepare_for_entire_export
        self.dataExportApplet.post_process_entire_export = self.post_process_entire_export

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)
        opDataExport.SelectionNames.setValue(self.EXPORT_NAMES)

        # -- BatchProcessing applet
        #
        self.batchProcessingApplet = BatchProcessingApplet(
            self, "Batch Processing", self.dataSelectionApplet,
            self.dataExportApplet)

        # -- Expose applets to shell
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.wsdtApplet)
        self._applets.append(self.edgeTrainingWithMulticutApplet)
        self._applets.append(self.dataExportApplet)
        self._applets.append(self.batchProcessingApplet)

        # -- Parse command-line arguments
        #    (Command-line args are applied in onProjectLoaded(), below.)
        # Parse workflow-specific command-line args
        parser = argparse.ArgumentParser()
        parser.add_argument(
            '--retrain',
            help=
            "Re-train the classifier based on labels stored in the project file, and re-save.",
            action="store_true")
        self.parsed_workflow_args, unused_args = parser.parse_known_args(
            workflow_cmdline_args)
        if unused_args:
            # Parse batch export/input args.
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args(
                unused_args)
            self._batch_input_args, unused_args = self.batchProcessingApplet.parse_known_cmdline_args(
                unused_args)
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warn("Unused command-line args: {}".format(unused_args))

        if not self._headless:
            shell.currentAppletChanged.connect(self.handle_applet_changed)
示例#32
0
class DataConversionWorkflow(Workflow):
    """
    Simple workflow for converting data between formats.
    Has only two 'interactive' applets (Data Selection and Data Export), plus the BatchProcessing applet.

    Supports headless mode. For example:

    .. code-block::

        python ilastik.py --headless
                          --new_project=NewTemporaryProject.ilp
                          --workflow=DataConversionWorkflow
                          --output_format="png sequence"
                          ~/input1.h5
                          ~/input2.h5

    .. note:: Beware of issues related to absolute vs. relative paths.
              Relative links are stored relative to the project file.

              To avoid this issue entirely, either
                 (1) use only absolute filepaths
              or (2) cd into your project file's directory before launching ilastik.

    """

    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_args, *args, **kwargs):

        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(
            shell, headless, workflow_cmdline_args, project_creation_args, graph=graph, *args, **kwargs
        )
        self._applets = []

        # Instantiate DataSelection applet
        self.dataSelectionApplet = DataSelectionApplet(
            self, "Input Data", "Input Data", supportIlastik05Import=True, forceAxisOrder=None
        )

        # Configure global DataSelection settings
        role_names = ["Input Data"]
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(role_names)

        # Instantiate DataExport applet
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)
        opDataExport.SelectionNames.setValue(["Input"])

        # No special data pre/post processing necessary in this workflow,
        #   but this is where we'd hook it up if we needed it.
        #
        # self.dataExportApplet.prepare_for_entire_export = self.prepare_for_entire_export
        # self.dataExportApplet.prepare_lane_for_export = self.prepare_lane_for_export
        # self.dataExportApplet.post_process_lane_export = self.post_process_lane_export
        # self.dataExportApplet.post_process_entire_export = self.post_process_entire_export

        # Instantiate BatchProcessing applet
        self.batchProcessingApplet = BatchProcessingApplet(
            self, "Batch Processing", self.dataSelectionApplet, self.dataExportApplet
        )

        # Expose our applets in a list (for the shell to use)
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.dataExportApplet)
        self._applets.append(self.batchProcessingApplet)

        # Parse command-line arguments
        # Command-line args are applied in onProjectLoaded(), below.
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args(workflow_cmdline_args)
            self._batch_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args(
                unused_args, role_names
            )
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warning("Unused command-line args: {}".format(unused_args))

    @property
    def applets(self):
        """
        Overridden from Workflow base class.
        """
        return self._applets

    @property
    def imageNameListSlot(self):
        """
        Overridden from Workflow base class.
        """
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def prepareForNewLane(self, laneIndex):
        """
        Overridden from Workflow base class.
        Called immediately before connectLane()
        """
        # No preparation necessary.
        pass

    def connectLane(self, laneIndex):
        """
        Overridden from Workflow base class.
        """
        # Get a *view* of each top-level operator, specific to the current lane.
        opDataSelectionView = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex)
        opDataExportView = self.dataExportApplet.topLevelOperator.getLane(laneIndex)

        # Now connect the operators together for this lane.
        # Most workflows would have more to do here, but this workflow is super simple:
        # We just connect input to export
        opDataExportView.RawDatasetInfo.connect(opDataSelectionView.DatasetGroup[RAW_DATA_ROLE_INDEX])
        opDataExportView.Inputs.resize(1)
        opDataExportView.Inputs[RAW_DATA_ROLE_INDEX].connect(opDataSelectionView.ImageGroup[RAW_DATA_ROLE_INDEX])

        # There is no special "raw" display layer in this workflow.
        # opDataExportView.RawData.connect( opDataSelectionView.ImageGroup[0] )

    def handleNewLanesAdded(self):
        """
        Overridden from Workflow base class.
        Called immediately AFTER connectLane() and the dataset is loaded into the workflow.
        """
        # No special handling required.
        pass

    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.

        If the user provided command-line arguments, use them to configure
        the workflow inputs and output settings.
        """
        # Configure the data export operator.
        if self._data_export_args:
            self.dataExportApplet.configure_operator_with_parsed_args(self._data_export_args)

        if self._headless and self._batch_input_args and self._data_export_args:
            logger.info("Beginning Batch Processing")
            self.batchProcessingApplet.run_export_from_parsed_args(self._batch_input_args)
            logger.info("Completed Batch Processing")

    def handleAppletStateUpdateRequested(self):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.statusUpdateSignal`
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        input_ready = len(opDataSelection.ImageGroup) > 0

        opDataExport = self.dataExportApplet.topLevelOperator
        export_data_ready = (
            input_ready
            and len(opDataExport.Inputs[0]) > 0
            and opDataExport.Inputs[0][0].ready()
            and (TinyVector(opDataExport.Inputs[0][0].meta.shape) > 0).all()
        )

        self._shell.setAppletEnabled(self.dataSelectionApplet, not self.batchProcessingApplet.busy)
        self._shell.setAppletEnabled(self.dataExportApplet, export_data_ready and not self.batchProcessingApplet.busy)
        self._shell.setAppletEnabled(self.batchProcessingApplet, export_data_ready)

        # Lastly, check for certain "busy" conditions, during which we
        #  should prevent the shell from closing the project.
        busy = False
        busy |= self.dataSelectionApplet.busy
        busy |= self.dataExportApplet.busy
        busy |= self.batchProcessingApplet.busy
        self._shell.enableProjectChanges(not busy)
示例#33
0
class WsdtWorkflow(Workflow):
    workflowName = "Watershed Over Distance Transform"
    workflowDescription = "A bare-bones workflow for using the WSDT applet"
    defaultAppletIndex = 0 # show DataSelection by default

    DATA_ROLE_RAW = 0
    DATA_ROLE_PROBABILITIES = 1
    ROLE_NAMES = ['Raw Data', 'Probabilities']
    EXPORT_NAMES = ['Watershed']

    @property
    def applets(self):
        return self._applets

    @property
    def imageNameListSlot(self):
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def __init__(self, shell, headless, workflow_cmdline_args, project_creation_workflow, *args, **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()

        super(WsdtWorkflow, self).__init__( shell, headless, workflow_cmdline_args, project_creation_workflow, graph=graph, *args, **kwargs)
        self._applets = []

        # -- DataSelection applet
        #
        self.dataSelectionApplet = DataSelectionApplet(self, "Input Data", "Input Data")

        # Dataset inputs
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue( self.ROLE_NAMES )

        # -- Wsdt applet
        #
        self.wsdtApplet = WsdtApplet(self, "Watershed", "Wsdt Watershed")

        # -- DataExport applet
        #
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect( opDataSelection.WorkingDirectory )
        opDataExport.SelectionNames.setValue( self.EXPORT_NAMES )

        # -- BatchProcessing applet
        #
        self.batchProcessingApplet = BatchProcessingApplet(self,
                                                           "Batch Processing",
                                                           self.dataSelectionApplet,
                                                           self.dataExportApplet)

        # -- Expose applets to shell
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.wsdtApplet)
        self._applets.append(self.dataExportApplet)
        self._applets.append(self.batchProcessingApplet)

        # -- Parse command-line arguments
        #    (Command-line args are applied in onProjectLoaded(), below.)
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args( workflow_cmdline_args )
            self._batch_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args( unused_args, role_names )
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warning("Unused command-line args: {}".format( unused_args ))

    def connectLane(self, laneIndex):
        """
        Override from base class.
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex)
        opWsdt = self.wsdtApplet.topLevelOperator.getLane(laneIndex)
        opDataExport = self.dataExportApplet.topLevelOperator.getLane(laneIndex)

        # watershed inputs
        opWsdt.RawData.connect( opDataSelection.ImageGroup[self.DATA_ROLE_RAW] )
        opWsdt.Input.connect( opDataSelection.ImageGroup[self.DATA_ROLE_PROBABILITIES] )

        # DataExport inputs
        opDataExport.RawData.connect( opDataSelection.ImageGroup[self.DATA_ROLE_RAW] )
        opDataExport.RawDatasetInfo.connect( opDataSelection.DatasetGroup[self.DATA_ROLE_RAW] )        
        opDataExport.Inputs.resize( len(self.EXPORT_NAMES) )
        opDataExport.Inputs[0].connect( opWsdt.Superpixels )
        for slot in opDataExport.Inputs:
            assert slot.partner is not None
        
    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.
        
        If the user provided command-line arguments, use them to configure 
        the workflow inputs and output settings.
        """
        # Configure the data export operator.
        if self._data_export_args:
            self.dataExportApplet.configure_operator_with_parsed_args( self._data_export_args )

        if self._headless and self._batch_input_args and self._data_export_args:
            logger.info("Beginning Batch Processing")
            self.batchProcessingApplet.run_export_from_parsed_args(self._batch_input_args)
            logger.info("Completed Batch Processing")

    def handleAppletStateUpdateRequested(self):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.appletStateUpdateRequested`
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataExport = self.dataExportApplet.topLevelOperator
        opWsdt = self.wsdtApplet.topLevelOperator

        # If no data, nothing else is ready.
        input_ready = len(opDataSelection.ImageGroup) > 0 and not self.dataSelectionApplet.busy

        # The user isn't allowed to touch anything while batch processing is running.
        batch_processing_busy = self.batchProcessingApplet.busy

        self._shell.setAppletEnabled( self.dataSelectionApplet,   not batch_processing_busy )
        self._shell.setAppletEnabled( self.wsdtApplet,            not batch_processing_busy and input_ready )
        self._shell.setAppletEnabled( self.dataExportApplet,      not batch_processing_busy and input_ready and opWsdt.Superpixels.ready())
        self._shell.setAppletEnabled( self.batchProcessingApplet, not batch_processing_busy and input_ready )

        # Lastly, check for certain "busy" conditions, during which we
        #  should prevent the shell from closing the project.
        busy = False
        busy |= self.dataSelectionApplet.busy
        busy |= self.wsdtApplet.busy
        busy |= self.dataExportApplet.busy
        busy |= self.batchProcessingApplet.busy
        self._shell.enableProjectChanges( not busy )
class DataConversionWorkflow(Workflow):
    """
    Simple workflow for converting data between formats.  Has only two applets: Data Selection and Data Export.
    
    Also supports a command-line interface for headless mode.
    
    For example:
    
    .. code-block:: bash

        python ilastik.py --headless --new_project=NewTemporaryProject.ilp --workflow=DataConversionWorkflow --output_format="png sequence" ~/input1.h5 ~/input2.h5
    
    Or if you have an existing project with input files already selected and configured:

    .. code-block:: bash

        python ilastik.py --headless --project=MyProject.ilp --output_format=jpeg
    
    .. note:: Beware of issues related to absolute vs. relative paths.  Relative links are stored relative to the project file.
              To avoid this issue entirely, either 
                 (1) use only absolute filepaths
              or (2) cd into your project file's directory before launching ilastik.
    
    """
    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_args, *args, **kwargs):

        # Create a graph to be shared by all operators
        graph = Graph()
        super(DataConversionWorkflow, self).__init__(shell,
                                                     headless,
                                                     workflow_cmdline_args,
                                                     project_creation_args,
                                                     graph=graph,
                                                     *args,
                                                     **kwargs)
        self._applets = []

        # Create applets
        self.dataSelectionApplet = DataSelectionApplet(
            self,
            "Input Data",
            "Input Data",
            supportIlastik05Import=True,
            batchDataGui=False,
            force5d=False)

        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(["Input Data"])

        self.dataExportApplet = DataExportApplet(self, "Data Export")

        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)
        opDataExport.SelectionNames.setValue(["Input"])

        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.dataExportApplet)

        # Parse command-line arguments
        # Command-line args are applied in onProjectLoaded(), below.
        self._workflow_cmdline_args = workflow_cmdline_args
        self._data_input_args = None
        self._data_export_args = None
        if workflow_cmdline_args:
            self._data_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args(
                workflow_cmdline_args)
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args(
                unused_args)
            if unused_args:
                logger.warn("Unused command-line args: {}".format(unused_args))

    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.
        
        If the user provided command-line arguments, use them to configure 
        the workflow inputs and output settings.
        """
        # Configure the batch data selection operator.
        if self._data_input_args and self._data_input_args.input_files:
            self.dataSelectionApplet.configure_operator_with_parsed_args(
                self._data_input_args)

        # Configure the data export operator.
        if self._data_export_args:
            self.dataExportApplet.configure_operator_with_parsed_args(
                self._data_export_args)

        if self._headless and self._data_input_args and self._data_export_args:
            # Now run the export and report progress....
            opDataExport = self.dataExportApplet.topLevelOperator
            for i, opExportDataLaneView in enumerate(opDataExport):
                logger.info("Exporting file #{} to {}".format(
                    i, opExportDataLaneView.ExportPath.value))

                sys.stdout.write("Result #{}/{} Progress: ".format(
                    i, len(opDataExport)))

                def print_progress(progress):
                    sys.stdout.write("{} ".format(progress))

                # If the operator provides a progress signal, use it.
                slotProgressSignal = opExportDataLaneView.progressSignal
                slotProgressSignal.subscribe(print_progress)
                opExportDataLaneView.run_export()

                # Finished.
                sys.stdout.write("\n")

    def connectLane(self, laneIndex):
        opDataSelectionView = self.dataSelectionApplet.topLevelOperator.getLane(
            laneIndex)
        opDataExportView = self.dataExportApplet.topLevelOperator.getLane(
            laneIndex)

        opDataExportView.RawDatasetInfo.connect(
            opDataSelectionView.DatasetGroup[0])
        opDataExportView.Inputs.resize(1)
        opDataExportView.Inputs[0].connect(opDataSelectionView.ImageGroup[0])

        # There is no special "raw" display layer in this workflow.
        #opDataExportView.RawData.connect( opDataSelectionView.ImageGroup[0] )

    @property
    def applets(self):
        return self._applets

    @property
    def imageNameListSlot(self):
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def handleAppletStateUpdateRequested(self):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.statusUpdateSignal`
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        input_ready = len(opDataSelection.ImageGroup) > 0

        opDataExport = self.dataExportApplet.topLevelOperator
        export_data_ready = input_ready and \
                            len(opDataExport.Inputs[0]) > 0 and \
                            opDataExport.Inputs[0][0].ready() and \
                            (TinyVector(opDataExport.Inputs[0][0].meta.shape) > 0).all()

        self._shell.setAppletEnabled(self.dataExportApplet, export_data_ready)

        # Lastly, check for certain "busy" conditions, during which we
        #  should prevent the shell from closing the project.
        busy = False
        busy |= self.dataSelectionApplet.busy
        busy |= self.dataExportApplet.busy
        self._shell.enableProjectChanges(not busy)
示例#35
0
class WsdtWorkflow(Workflow):
    workflowName = "Watershed Over Distance Transform"
    workflowDescription = "A bare-bones workflow for using the WSDT applet"
    defaultAppletIndex = 0  # show DataSelection by default

    DATA_ROLE_RAW = 0
    DATA_ROLE_PROBABILITIES = 1
    ROLE_NAMES = ['Raw Data', 'Probabilities']
    EXPORT_NAMES = ['Watershed']

    @property
    def applets(self):
        return self._applets

    @property
    def imageNameListSlot(self):
        return self.dataSelectionApplet.topLevelOperator.ImageName

    def __init__(self, shell, headless, workflow_cmdline_args,
                 project_creation_workflow, *args, **kwargs):
        # Create a graph to be shared by all operators
        graph = Graph()

        super(WsdtWorkflow, self).__init__(shell,
                                           headless,
                                           workflow_cmdline_args,
                                           project_creation_workflow,
                                           graph=graph,
                                           *args,
                                           **kwargs)
        self._applets = []

        # -- DataSelection applet
        #
        self.dataSelectionApplet = DataSelectionApplet(self, "Input Data",
                                                       "Input Data")

        # Dataset inputs
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetRoles.setValue(self.ROLE_NAMES)

        # -- Wsdt applet
        #
        self.wsdtApplet = WsdtApplet(self, "Watershed", "Wsdt Watershed")

        # -- DataExport applet
        #
        self.dataExportApplet = DataExportApplet(self, "Data Export")

        # Configure global DataExport settings
        opDataExport = self.dataExportApplet.topLevelOperator
        opDataExport.WorkingDirectory.connect(opDataSelection.WorkingDirectory)
        opDataExport.SelectionNames.setValue(self.EXPORT_NAMES)

        # -- BatchProcessing applet
        #
        self.batchProcessingApplet = BatchProcessingApplet(
            self, "Batch Processing", self.dataSelectionApplet,
            self.dataExportApplet)

        # -- Expose applets to shell
        self._applets.append(self.dataSelectionApplet)
        self._applets.append(self.wsdtApplet)
        self._applets.append(self.dataExportApplet)
        self._applets.append(self.batchProcessingApplet)

        # -- Parse command-line arguments
        #    (Command-line args are applied in onProjectLoaded(), below.)
        if workflow_cmdline_args:
            self._data_export_args, unused_args = self.dataExportApplet.parse_known_cmdline_args(
                workflow_cmdline_args)
            self._batch_input_args, unused_args = self.dataSelectionApplet.parse_known_cmdline_args(
                unused_args, role_names)
        else:
            unused_args = None
            self._batch_input_args = None
            self._data_export_args = None

        if unused_args:
            logger.warning("Unused command-line args: {}".format(unused_args))

    def connectLane(self, laneIndex):
        """
        Override from base class.
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator.getLane(
            laneIndex)
        opWsdt = self.wsdtApplet.topLevelOperator.getLane(laneIndex)
        opDataExport = self.dataExportApplet.topLevelOperator.getLane(
            laneIndex)

        # watershed inputs
        opWsdt.RawData.connect(opDataSelection.ImageGroup[self.DATA_ROLE_RAW])
        opWsdt.Input.connect(
            opDataSelection.ImageGroup[self.DATA_ROLE_PROBABILITIES])

        # DataExport inputs
        opDataExport.RawData.connect(
            opDataSelection.ImageGroup[self.DATA_ROLE_RAW])
        opDataExport.RawDatasetInfo.connect(
            opDataSelection.DatasetGroup[self.DATA_ROLE_RAW])
        opDataExport.Inputs.resize(len(self.EXPORT_NAMES))
        opDataExport.Inputs[0].connect(opWsdt.Superpixels)
        for slot in opDataExport.Inputs:
            assert slot.upstream_slot is not None

    def onProjectLoaded(self, projectManager):
        """
        Overridden from Workflow base class.  Called by the Project Manager.
        
        If the user provided command-line arguments, use them to configure 
        the workflow inputs and output settings.
        """
        # Configure the data export operator.
        if self._data_export_args:
            self.dataExportApplet.configure_operator_with_parsed_args(
                self._data_export_args)

        if self._headless and self._batch_input_args and self._data_export_args:
            logger.info("Beginning Batch Processing")
            self.batchProcessingApplet.run_export_from_parsed_args(
                self._batch_input_args)
            logger.info("Completed Batch Processing")

    def handleAppletStateUpdateRequested(self):
        """
        Overridden from Workflow base class
        Called when an applet has fired the :py:attr:`Applet.appletStateUpdateRequested`
        """
        opDataSelection = self.dataSelectionApplet.topLevelOperator
        opDataExport = self.dataExportApplet.topLevelOperator
        opWsdt = self.wsdtApplet.topLevelOperator

        # If no data, nothing else is ready.
        input_ready = len(opDataSelection.ImageGroup
                          ) > 0 and not self.dataSelectionApplet.busy

        # The user isn't allowed to touch anything while batch processing is running.
        batch_processing_busy = self.batchProcessingApplet.busy

        self._shell.setAppletEnabled(self.dataSelectionApplet,
                                     not batch_processing_busy)
        self._shell.setAppletEnabled(self.wsdtApplet, not batch_processing_busy
                                     and input_ready)
        self._shell.setAppletEnabled(
            self.dataExportApplet, not batch_processing_busy and input_ready
            and opWsdt.Superpixels.ready())
        self._shell.setAppletEnabled(self.batchProcessingApplet,
                                     not batch_processing_busy and input_ready)

        # Lastly, check for certain "busy" conditions, during which we
        #  should prevent the shell from closing the project.
        busy = False
        busy |= self.dataSelectionApplet.busy
        busy |= self.wsdtApplet.busy
        busy |= self.dataExportApplet.busy
        busy |= self.batchProcessingApplet.busy
        self._shell.enableProjectChanges(not busy)