コード例 #1
0
    def create_new_tst_project(cls):
        # Instantiate 'shell'
        shell = HeadlessShell(  )
        
        # Create a blank project file and load it.
        newProjectFilePath = cls.PROJECT_FILE
        newProjectFile = ProjectManager.createBlankProjectFile(newProjectFilePath, PixelClassificationWorkflow, [])
        newProjectFile.close()
        shell.openProjectFile(newProjectFilePath)
        workflow = shell.workflow
        
        # Add a file
        from ilastik.applets.dataSelection.opDataSelection import DatasetInfo
        info = DatasetInfo()
        info.filePath = cls.SAMPLE_DATA
        opDataSelection = workflow.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetGroup.resize(1)
        opDataSelection.DatasetGroup[0][0].setValue(info)
        
        
        # Set some features
        ScalesList = [0.3, 0.7, 1, 1.6, 3.5, 5.0, 10.0]    
        FeatureIds = [ 'GaussianSmoothing',
                       'LaplacianOfGaussian',
                       'StructureTensorEigenvalues',
                       'HessianOfGaussianEigenvalues',
                       'GaussianGradientMagnitude',
                       'DifferenceOfGaussians' ]

        opFeatures = workflow.featureSelectionApplet.topLevelOperator
        opFeatures.Scales.setValue( ScalesList )
        opFeatures.FeatureIds.setValue( FeatureIds )

        #                    sigma:   0.3    0.7    1.0    1.6    3.5    5.0   10.0
        selections = numpy.array( [[True, False, False, False, False, False, False],
                                   [True, False, False, False, False, False, False],
                                   [True, False, False, False, False, False, False],
                                   [False, False, False, False, False, False, False],
                                   [False, False, False, False, False, False, False],
                                   [False, False, False, False, False, False, False]] )
        opFeatures.SelectionMatrix.setValue(selections)
    
        # Add some labels directly to the operator
        opPixelClass = workflow.pcApplet.topLevelOperator

        opPixelClass.LabelNames.setValue(['Label 1', 'Label 2'])

        slicing1 = sl[0:1,0:10,0:10,0:1,0:1]
        labels1 = 1 * numpy.ones(slicing2shape(slicing1), dtype=numpy.uint8)
        opPixelClass.LabelInputs[0][slicing1] = labels1

        slicing2 = sl[0:1,0:10,10:20,0:1,0:1]
        labels2 = 2 * numpy.ones(slicing2shape(slicing2), dtype=numpy.uint8)
        opPixelClass.LabelInputs[0][slicing2] = labels2

        # Save and close
        shell.projectManager.saveProject()
        del shell
コード例 #2
0
    def create_new_tst_project(cls):
        # Instantiate 'shell'
        shell = HeadlessShell()

        # Create a blank project file and load it.
        newProjectFilePath = cls.PROJECT_FILE
        newProjectFile = ProjectManager.createBlankProjectFile(
            newProjectFilePath, PixelClassificationWorkflow, [])
        newProjectFile.close()
        shell.openProjectFile(newProjectFilePath)
        workflow = shell.workflow

        # Add a file
        from ilastik.applets.dataSelection.opDataSelection import DatasetInfo
        info = DatasetInfo()
        info.filePath = cls.SAMPLE_DATA
        opDataSelection = workflow.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetGroup.resize(1)
        opDataSelection.DatasetGroup[0][0].setValue(info)

        # Set some features
        ScalesList = [0.3, 0.7, 1, 1.6, 3.5, 5.0, 10.0]
        FeatureIds = [
            'GaussianSmoothing', 'LaplacianOfGaussian',
            'StructureTensorEigenvalues', 'HessianOfGaussianEigenvalues',
            'GaussianGradientMagnitude', 'DifferenceOfGaussians'
        ]

        opFeatures = workflow.featureSelectionApplet.topLevelOperator
        opFeatures.Scales.setValue(ScalesList)
        opFeatures.FeatureIds.setValue(FeatureIds)

        #                    sigma:   0.3    0.7    1.0    1.6    3.5    5.0   10.0
        selections = numpy.array(
            [[True, False, False, False, False, False, False],
             [True, False, False, False, False, False, False],
             [True, False, False, False, False, False, False],
             [False, False, False, False, False, False, False],
             [False, False, False, False, False, False, False],
             [False, False, False, False, False, False, False]])
        opFeatures.SelectionMatrix.setValue(selections)

        # Add some labels directly to the operator
        opPixelClass = workflow.pcApplet.topLevelOperator

        slicing1 = sl[0:1, 0:10, 0:10, 0:1, 0:1]
        labels1 = 1 * numpy.ones(slicing2shape(slicing1), dtype=numpy.uint8)
        opPixelClass.LabelInputs[0][slicing1] = labels1

        slicing2 = sl[0:1, 0:10, 10:20, 0:1, 0:1]
        labels2 = 2 * numpy.ones(slicing2shape(slicing2), dtype=numpy.uint8)
        opPixelClass.LabelInputs[0][slicing2] = labels2

        # Save and close
        shell.projectManager.saveProject()
        del shell
コード例 #3
0
def startShellHeadless(workflowClass):
    """
    Start the ilastik shell in "headless" mode with the given workflow type.
    
    workflowClass - the type of workflow to instantiate for the shell.
    
    Returns the shell and workflow instance.
    """
    # Create workflow
    workflow = workflowClass()
    
    # Create the shell and populate it
    shell = HeadlessShell()
    for app in workflow.applets:
        shell.addApplet(app)

    # For now, headless shell doesn't do anything that requires knowledge of image names.
    #shell.setImageNameListSlot( workflow.imageNameListSlot )

    return (shell, workflow)
コード例 #4
0
def main( parsed_args, workflow_cmdline_args=[] ):
    this_path = os.path.dirname(__file__)
    ilastik_dir = os.path.abspath(os.path.join(this_path, "..%s.." % os.path.sep))
    
    # If necessary, redirect stdout BEFORE logging is initialized
    _redirect_output( parsed_args )
    _init_logging( parsed_args ) # Initialize logging before anything else

    _init_configfile( parsed_args )
    
    _clean_paths( parsed_args, ilastik_dir )
    _update_debug_mode( parsed_args )
    _init_threading_monkeypatch()
    _validate_arg_compatibility( parsed_args )

    # Extra initialization functions.
    # Called during app startup.
    init_funcs = []
    lazyflow_config_fn = _prepare_lazyflow_config( parsed_args )
    if lazyflow_config_fn:
        init_funcs.append( lazyflow_config_fn )    

    load_fn = _prepare_auto_open_project( parsed_args )
    if load_fn:
        init_funcs.append( load_fn )    
    
    create_fn = _prepare_auto_create_new_project( parsed_args )
    if create_fn:
        init_funcs.append( create_fn )

    _enable_faulthandler()
    _init_excepthooks( parsed_args )
    eventcapture_mode, playback_args = _prepare_test_recording_and_playback( parsed_args )    

    if ilastik_config.getboolean("ilastik", "debug"):
        message = 'Starting ilastik in debug mode from "%s".' % ilastik_dir
        logger.info(message)
        print message     # always print the startup message
    else:
        message = 'Starting ilastik from "%s".' % ilastik_dir
        logger.info(message)
        print message     # always print the startup message
    
    # Headless launch
    if parsed_args.headless:
        from ilastik.shell.headless.headlessShell import HeadlessShell
        shell = HeadlessShell( workflow_cmdline_args )
        for f in init_funcs:
            f(shell)
        return shell
    # Normal launch
    else:
        from ilastik.shell.gui.startShellGui import startShellGui
        sys.exit(startShellGui(workflow_cmdline_args, eventcapture_mode, playback_args, *init_funcs))
コード例 #5
0
def runWorkflow(parsed_args):
    args = parsed_args
    
    # Use a temporary cache dir
    if args.stack_volume_cache_dir is None:
        args.stack_volume_cache_dir = tempfile.gettempdir()
    
    # Make sure project file exists.
    if not os.path.exists(args.project):
        raise RuntimeError("Project file '" + args.project + "' does not exist.")

    # Make sure batch inputs exist.
    for p in args.batch_inputs:
        error = False
        p = PathComponents(p).externalPath
        if '*' in p:
            if len(glob.glob(p)) == 0:
                logger.error("Could not find any files for globstring: {}".format(p))
                logger.error("Check your quotes!")
                error = True
        elif not os.path.exists(p):
            logger.error("Batch input file does not exist: " + p)
            error = True
        if error:
            raise RuntimeError("Could not find one or more batch inputs.  See logged errors.")

    # Instantiate 'shell'
    shell = HeadlessShell( functools.partial(PixelClassificationWorkflow, appendBatchOperators=True) )
    
    if args.assume_old_ilp_axes:
        # Special hack for Janelia: 
        # In some old versions of 0.5, the data was stored in tyxzc order.
        # We have no way of inspecting the data to determine this, so we allow 
        #  users to specify that their ilp is very old using the 
        #  assume_old_ilp_axes command-line flag
        ilastik.utility.globals.ImportOptions.default_axis_order = 'tyxzc'

    # Load project (auto-import it if necessary)
    logger.info("Opening project: '" + args.project + "'")
    shell.openProjectPath(args.project)

    try:
        if not args.generate_project_predictions and len(args.batch_inputs) == 0:
            logger.error("Command-line arguments didn't specify any classification jobs.")
        else:
            # Predictions for project input datasets
            if args.generate_project_predictions:
                generateProjectPredictions(shell)
        
            # Predictions for other datasets ('batch datasets')
            result = True
            if len(args.batch_inputs) > 0:
                result = generateBatchPredictions(shell.workflow,
                                                  args.batch_inputs,
                                                  args.batch_export_dir,
                                                  args.batch_output_suffix,
                                                  args.batch_output_dataset_name,
                                                  args.stack_volume_cache_dir)
                assert result
    finally:
        logger.info("Closing project...")
        shell.closeCurrentProject()

    logger.info("FINISHED.")
コード例 #6
0
ファイル: app.py プロジェクト: syaffa/ilastik
def main(parsed_args, workflow_cmdline_args=[], init_logging=True):
    """
    init_logging: Skip logging config initialization by setting this to False.
                  (Useful when opening multiple projects in a Python script.)
    """
    this_path = os.path.dirname(__file__)
    ilastik_dir = os.path.abspath(
        os.path.join(this_path, "..%s.." % os.path.sep))
    _import_h5py_with_utf8_encoding()
    _update_debug_mode(parsed_args)
    _update_hbp_mode(parsed_args)

    # If necessary, redirect stdout BEFORE logging is initialized
    _redirect_output(parsed_args)

    if init_logging:
        _init_logging(parsed_args)  # Initialize logging before anything else

    _init_configfile(parsed_args)

    _init_threading_logging_monkeypatch()

    _init_preferences()

    # Extra initialization functions.
    # These are called during app startup, but before the shell is created.
    preinit_funcs = []
    # Must be first (or at least before vigra).
    preinit_funcs.append(_import_opengm)

    lazyflow_config_fn = _prepare_lazyflow_config(parsed_args)
    if lazyflow_config_fn:
        preinit_funcs.append(lazyflow_config_fn)

    # More initialization functions.
    # These will be called AFTER the shell is created.
    # The shell is provided as a parameter to the function.
    postinit_funcs = []
    load_fn = _prepare_auto_open_project(parsed_args)
    if load_fn:
        postinit_funcs.append(load_fn)

    create_fn = _prepare_auto_create_new_project(parsed_args)
    if create_fn:
        postinit_funcs.append(create_fn)

    faulthandler.enable()
    _init_excepthooks(parsed_args)

    if ilastik_config.getboolean("ilastik", "debug"):
        message = 'Starting ilastik in debug mode from "%s".' % ilastik_dir
        logging.basicConfig(level=logging.DEBUG)
        logger.info(message)
        print(message)  # always print the startup message
    else:
        message = 'Starting ilastik from "%s".' % ilastik_dir
        logger.info(message)
        print(message)  # always print the startup message

    # Headless launch
    if parsed_args.headless:
        # Run pre-init
        for f in preinit_funcs:
            f()

        from ilastik.shell.headless.headlessShell import HeadlessShell

        shell = HeadlessShell(workflow_cmdline_args)

        # Run post-init
        for f in postinit_funcs:
            f(shell)
        return shell
    # Normal launch
    else:
        from ilastik.shell.gui.startShellGui import startShellGui

        sys.exit(
            startShellGui(workflow_cmdline_args, preinit_funcs,
                          postinit_funcs))
コード例 #7
0
    def create_new_project(cls, project_file_path, dataset_path):
        # Instantiate 'shell'
        shell = HeadlessShell()

        # Create a blank project file and load it.
        newProjectFile = ProjectManager.createBlankProjectFile(project_file_path, PixelClassificationWorkflow, [])
        newProjectFile.close()
        shell.openProjectFile(project_file_path)
        workflow = shell.workflow

        # Add a file
        from ilastik.applets.dataSelection.opDataSelection import FilesystemDatasetInfo

        info = FilesystemDatasetInfo(filePath=dataset_path)
        opDataSelection = workflow.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetGroup.resize(1)
        opDataSelection.DatasetGroup[0][0].setValue(info)

        # Set some features
        ScalesList = [0.3, 0.7, 1, 1.6, 3.5, 5.0, 10.0]
        FeatureIds = [
            "GaussianSmoothing",
            "LaplacianOfGaussian",
            "StructureTensorEigenvalues",
            "HessianOfGaussianEigenvalues",
            "GaussianGradientMagnitude",
            "DifferenceOfGaussians",
        ]

        opFeatures = workflow.featureSelectionApplet.topLevelOperator
        opFeatures.Scales.setValue(ScalesList)
        opFeatures.FeatureIds.setValue(FeatureIds)

        #                    sigma:   0.3    0.7    1.0    1.6    3.5    5.0   10.0
        selections = numpy.array(
            [
                [True, False, False, False, False, False, False],
                [True, False, False, False, False, False, False],
                [True, False, False, False, False, False, False],
                [False, False, False, False, False, False, False],
                [False, False, False, False, False, False, False],
                [False, False, False, False, False, False, False],
            ]
        )
        opFeatures.SelectionMatrix.setValue(selections)

        # Add some labels directly to the operator
        opPixelClass = workflow.pcApplet.topLevelOperator

        opPixelClass.LabelNames.setValue(["Label 1", "Label 2"])

        slicing1 = sl[0:1, 0:10, 0:10, 0:1, 0:1]
        labels1 = 1 * numpy.ones(slicing2shape(slicing1), dtype=numpy.uint8)
        opPixelClass.LabelInputs[0][slicing1] = labels1

        slicing2 = sl[0:1, 0:10, 10:20, 0:1, 0:1]
        labels2 = 2 * numpy.ones(slicing2shape(slicing2), dtype=numpy.uint8)
        opPixelClass.LabelInputs[0][slicing2] = labels2

        # Train the classifier
        opPixelClass.FreezePredictions.setValue(False)
        _ = opPixelClass.Classifier.value

        # Save and close
        shell.projectManager.saveProject()
        del shell
コード例 #8
0
def runWorkflow(parsed_args):
    args = parsed_args

    # Read the config file
    configFilePath = args.option_config_file
    config = parseClusterConfigFile( configFilePath )

    # If we've got a process name, re-initialize the logger from scratch
    task_name = "node"
    if args.process_name is not None:
        task_name = args.process_name
        ilastik.ilastik_logging.default_config.init(args.process_name + ' ')

    rootLogHandler = None
    if args._node_work_ is None:
        # This is the master process.
        # Tee the log to a file for future reference.

        # Output log directory might be a relative path (relative to config file)
        absLogDir, relLogDir = getPathVariants(config.output_log_directory, os.path.split( configFilePath )[0] )
        if not os.path.exists(absLogDir):
            os.mkdir(absLogDir)

        # Copy the config we're using to the output directory
        shutil.copy(configFilePath, absLogDir)
        
        logFile = os.path.join( absLogDir, "MASTER.log" )
        logFileFormatter = logging.Formatter("%(levelname)s %(name)s: %(message)s")
        rootLogHandler = logging.FileHandler(logFile, 'a')
        rootLogHandler.setFormatter(logFileFormatter)
        rootLogger = logging.getLogger()
        rootLogger.addHandler( rootLogHandler )
        logger.info( "Launched with sys.argv: {}".format( sys.argv ) )

    # Update the monkey_patch settings
    ilastik.utility.monkey_patches.apply_setting_dict( config.__dict__ )

    # If we're running a node job, set the threadpool size if the user specified one.
    # Note that the main thread does not count toward the threadpool total.
    if args._node_work_ is not None and config.task_threadpool_size is not None:
        lazyflow.request.Request.reset_thread_pool( num_workers = config.task_threadpool_size )

    # Make sure project file exists.
    if not os.path.exists(args.project):
        raise RuntimeError("Project file '" + args.project + "' does not exist.")

    # Instantiate 'shell'
    shell = HeadlessShell( functools.partial(Workflow.getSubclass(config.workflow_type) ) )
    
    # Load project (auto-import it if necessary)
    logger.info("Opening project: '" + args.project + "'")
    shell.openProjectPath(args.project)

    workflow = shell.projectManager.workflow
            
    # Attach cluster operators
    resultSlot = None
    finalOutputSlot = workflow.getHeadlessOutputSlot( config.output_slot_id )
    assert finalOutputSlot is not None

    secondaryOutputSlots = workflow.getSecondaryHeadlessOutputSlots( config.output_slot_id )
    secondaryOutputDescriptions = args.secondary_output_description_file # This is a list (see 'action' above)
    if len(secondaryOutputDescriptions) != len(secondaryOutputSlots):
        raise RuntimeError( "This workflow produces exactly {} SECONDARY outputs.  You provided {}.".format( len(secondaryOutputSlots), len(secondaryOutputDescriptions) ) )
    
    clusterOperator = None
    try:
        if args._node_work_ is not None:
            # We're doing node work
            opClusterTaskWorker = OperatorWrapper( OpTaskWorker, parent=finalOutputSlot.getRealOperator().parent )

            # FIXME: Image index is hard-coded as 0.  We assume we are working with only one (big) dataset in cluster mode.            
            opClusterTaskWorker.Input.connect( finalOutputSlot )
            opClusterTaskWorker.RoiString[0].setValue( args._node_work_ )
            opClusterTaskWorker.TaskName.setValue( task_name )
            opClusterTaskWorker.ConfigFilePath.setValue( args.option_config_file )

            # Configure optional slots first for efficiency (avoid multiple calls to setupOutputs)
            opClusterTaskWorker.SecondaryInputs[0].resize( len( secondaryOutputSlots ) )
            opClusterTaskWorker.SecondaryOutputDescriptions[0].resize( len( secondaryOutputSlots ) )
            for i in range( len(secondaryOutputSlots) ):
                opClusterTaskWorker.SecondaryInputs[0][i].connect( secondaryOutputSlots[i][0] )
                opClusterTaskWorker.SecondaryOutputDescriptions[0][i].setValue( secondaryOutputDescriptions[i] )

            opClusterTaskWorker.OutputFilesetDescription.setValue( args.output_description_file )
    
            # If we have a way to report task progress (e.g. by updating the job name),
            #  then subscribe to progress signals
            if config.task_progress_update_command is not None:
                def report_progress( progress ):
                    cmd = config.task_progress_update_command.format( progress=int(progress) )
                    def shell_call(shell_cmd):
                        logger.debug( "Executing progress command: " + cmd )
                        subprocess.call( shell_cmd, shell=True )
                    background_tasks.put( functools.partial( shell_call, cmd ) )
                opClusterTaskWorker.innerOperators[0].progressSignal.subscribe( report_progress )
            
            resultSlot = opClusterTaskWorker.ReturnCode
            clusterOperator = opClusterTaskWorker
        else:
            # We're the master
            opClusterizeMaster = OperatorWrapper( OpClusterize, parent=finalOutputSlot.getRealOperator().parent )

            opClusterizeMaster.Input.connect( finalOutputSlot )
            opClusterizeMaster.ProjectFilePath.setValue( args.project )
            opClusterizeMaster.OutputDatasetDescription.setValue( args.output_description_file )

            # Configure optional slots first for efficiency (avoid multiple calls to setupOutputs)
            opClusterizeMaster.SecondaryInputs[0].resize( len( secondaryOutputSlots ) )
            opClusterizeMaster.SecondaryOutputDescriptions[0].resize( len( secondaryOutputSlots ) )
            for i in range( len(secondaryOutputSlots) ):
                opClusterizeMaster.SecondaryInputs[0][i].connect( secondaryOutputSlots[i][0] )
                opClusterizeMaster.SecondaryOutputDescriptions[0][i].setValue( secondaryOutputDescriptions[i] )    

            opClusterizeMaster.ConfigFilePath.setValue( args.option_config_file )

            resultSlot = opClusterizeMaster.ReturnCode
            clusterOperator = opClusterizeMaster
        
        # Get the result
        logger.info("Starting task")
        result = resultSlot[0].value # FIXME: The image index is hard-coded here.
    finally:
        logger.info("Cleaning up")
        global stop_background_tasks
        stop_background_tasks = True
        
        try:
            if clusterOperator is not None:
                clusterOperator.cleanUp()
        except:
            logger.error("Errors during cleanup.")

        try:
            logger.info("Closing project...")
            shell.closeCurrentProject()
        except:
            logger.error("Errors while closing project.")
    
    logger.info("FINISHED with result {}".format(result))
    if not result:
        logger.error( "FAILED TO COMPLETE!" )

    if rootLogHandler is not None:
        rootLogHandler.close()
コード例 #9
0
def main( parsed_args, workflow_cmdline_args=[] ):
    this_path = os.path.dirname(__file__)
    ilastik_dir = os.path.abspath(os.path.join(this_path, "..%s.." % os.path.sep))
    _update_debug_mode( parsed_args )
    
    # If necessary, redirect stdout BEFORE logging is initialized
    _redirect_output( parsed_args )
    _init_logging( parsed_args ) # Initialize logging before anything else

    _init_configfile( parsed_args )
    
    _init_threading_logging_monkeypatch()
    _init_threading_h5py_monkeypatch()
    _validate_arg_compatibility( parsed_args )

    # Extra initialization functions.
    # These are called during app startup, but before the shell is created.
    preinit_funcs = []
    preinit_funcs.append( _import_opengm ) # Must be first (or at least before vigra).
    
    lazyflow_config_fn = _prepare_lazyflow_config( parsed_args )
    if lazyflow_config_fn:
        preinit_funcs.append( lazyflow_config_fn )

    # More initialization functions.
    # These will be called AFTER the shell is created.
    # The shell is provided as a parameter to the function.
    postinit_funcs = []
    load_fn = _prepare_auto_open_project( parsed_args )
    if load_fn:
        postinit_funcs.append( load_fn )
    
    create_fn = _prepare_auto_create_new_project( parsed_args )
    if create_fn:
        postinit_funcs.append( create_fn )

    _enable_faulthandler()
    _init_excepthooks( parsed_args )
    eventcapture_mode, playback_args = _prepare_test_recording_and_playback( parsed_args )    

    if ilastik_config.getboolean("ilastik", "debug"):
        message = 'Starting ilastik in debug mode from "%s".' % ilastik_dir
        logger.info(message)
        print message     # always print the startup message
    else:
        message = 'Starting ilastik from "%s".' % ilastik_dir
        logger.info(message)
        print message     # always print the startup message
    
    # Headless launch
    if parsed_args.headless:
        # If any applet imports the GUI in headless mode, that's a mistake.
        # To help developers catch such mistakes, we replace PyQt with a dummy module, so we'll see import errors.
        import ilastik
        dummy_module_dir = os.path.join( os.path.split(ilastik.__file__)[0], "headless_dummy_modules" )
        sys.path.insert(0, dummy_module_dir)

        # Run pre-init
        for f in preinit_funcs:
            f()
        
        from ilastik.shell.headless.headlessShell import HeadlessShell
        shell = HeadlessShell( workflow_cmdline_args )

        # Run post-init
        for f in postinit_funcs:
            f(shell)
        return shell
    # Normal launch
    else:
        from ilastik.shell.gui.startShellGui import startShellGui
        sys.exit(startShellGui(workflow_cmdline_args, eventcapture_mode, playback_args, preinit_funcs, postinit_funcs))
コード例 #10
0
def runWorkflow(parsed_args):
    args = parsed_args

    # Use a temporary cache dir
    if args.stack_volume_cache_dir is None:
        args.stack_volume_cache_dir = tempfile.gettempdir()

    # Make sure project file exists.
    if not os.path.exists(args.project):
        raise RuntimeError("Project file '" + args.project +
                           "' does not exist.")

    # Make sure batch inputs exist.
    for p in args.batch_inputs:
        error = False
        p = PathComponents(p).externalPath
        if '*' in p:
            if len(glob.glob(p)) == 0:
                logger.error(
                    "Could not find any files for globstring: {}".format(p))
                logger.error("Check your quotes!")
                error = True
        elif not os.path.exists(p):
            logger.error("Batch input file does not exist: " + p)
            error = True
        if error:
            raise RuntimeError(
                "Could not find one or more batch inputs.  See logged errors.")

    # Instantiate 'shell'
    shell = HeadlessShell(
        functools.partial(PixelClassificationWorkflow,
                          appendBatchOperators=True))

    if args.assume_old_ilp_axes:
        # Special hack for Janelia:
        # In some old versions of 0.5, the data was stored in tyxzc order.
        # We have no way of inspecting the data to determine this, so we allow
        #  users to specify that their ilp is very old using the
        #  assume_old_ilp_axes command-line flag
        ilastik.utility.globals.ImportOptions.default_axis_order = 'tyxzc'

    # Load project (auto-import it if necessary)
    logger.info("Opening project: '" + args.project + "'")
    shell.openProjectFile(args.project)

    try:
        if not args.generate_project_predictions and len(
                args.batch_inputs) == 0:
            logger.error(
                "Command-line arguments didn't specify any classification jobs."
            )
        else:
            # Predictions for project input datasets
            if args.generate_project_predictions:
                generateProjectPredictions(shell)

            # Predictions for other datasets ('batch datasets')
            result = True
            if len(args.batch_inputs) > 0:
                result = generateBatchPredictions(
                    shell.workflow, args.batch_inputs, args.batch_export_dir,
                    args.batch_output_suffix, args.batch_output_dataset_name,
                    args.stack_volume_cache_dir)
                assert result
    finally:
        logger.info("Closing project...")
        shell.closeCurrentProject()

    logger.info("FINISHED.")
コード例 #11
0
import functools

from ilastik.shell.headless.headlessShell import HeadlessShell
from ilastik.workflows.pixelClassification.pixelClassificationWorkflow import PixelClassificationWorkflow

if __name__ == "__main__":
    shell = HeadlessShell( functools.partial(PixelClassificationWorkflow, appendBatchOperators=False) )
    shell.createBlankProjectFile("/tmp/testproject.ilp")
    workflow = shell.workflow 
    
    d = {a.name : a for a in workflow.applets}
    
    d["Project Metadata"]._projectMetadata.projectName = "Test project"
    d["Project Metadata"]._projectMetadata.labeler     = "Mr. Labeler"
    d["Project Metadata"]._projectMetadata.description = "Automatically generated"
    
    shell.projectManager.saveProject() 
    
    
コード例 #12
0
def runWorkflow(parsed_args):
    args = parsed_args

    # Read the config file
    configFilePath = args.option_config_file
    config = parseClusterConfigFile(configFilePath)

    # If we've got a process name, re-initialize the logger from scratch
    task_name = "node"
    if args.process_name is not None:
        task_name = args.process_name
        ilastik.ilastik_logging.default_config.init(args.process_name + ' ')

    rootLogHandler = None
    if args._node_work_ is None:
        # This is the master process.
        # Tee the log to a file for future reference.

        # Output log directory might be a relative path (relative to config file)
        absLogDir, _ = getPathVariants(config.output_log_directory,
                                       os.path.split(configFilePath)[0])
        if not os.path.exists(absLogDir):
            os.mkdir(absLogDir)

        # Copy the config we're using to the output directory
        shutil.copy(configFilePath, absLogDir)

        logFile = os.path.join(absLogDir, "MASTER.log")
        logFileFormatter = logging.Formatter(
            "%(levelname)s %(name)s: %(message)s")
        rootLogHandler = logging.FileHandler(logFile, 'a')
        rootLogHandler.setFormatter(logFileFormatter)
        rootLogger = logging.getLogger()
        rootLogger.addHandler(rootLogHandler)
        logger.info("Launched with sys.argv: {}".format(sys.argv))

    # Update the monkey_patch settings
    ilastik.monkey_patches.apply_setting_dict(config.__dict__)

    # If we're running a node job, set the threadpool size if the user specified one.
    # Note that the main thread does not count toward the threadpool total.
    if args._node_work_ is not None and config.task_threadpool_size is not None:
        lazyflow.request.Request.reset_thread_pool(
            num_workers=config.task_threadpool_size)

    # Make sure project file exists.
    if not os.path.exists(args.project):
        raise RuntimeError("Project file '" + args.project +
                           "' does not exist.")

    # Instantiate 'shell'
    shell = HeadlessShell(
        functools.partial(Workflow.getSubclass(config.workflow_type)))

    # Load project (auto-import it if necessary)
    logger.info("Opening project: '" + args.project + "'")
    shell.openProjectPath(args.project)

    workflow = shell.projectManager.workflow

    # Attach cluster operators
    resultSlot = None
    finalOutputSlot = workflow.getHeadlessOutputSlot(config.output_slot_id)
    assert finalOutputSlot is not None

    secondaryOutputSlots = workflow.getSecondaryHeadlessOutputSlots(
        config.output_slot_id)
    secondaryOutputDescriptions = args.secondary_output_description_file  # This is a list (see 'action' above)
    if len(secondaryOutputDescriptions) != len(secondaryOutputSlots):
        raise RuntimeError(
            "This workflow produces exactly {} SECONDARY outputs.  You provided {}."
            .format(len(secondaryOutputSlots),
                    len(secondaryOutputDescriptions)))

    clusterOperator = None
    try:
        if args._node_work_ is not None:
            # We're doing node work
            opClusterTaskWorker = OperatorWrapper(
                OpTaskWorker, parent=finalOutputSlot.getRealOperator().parent)

            # FIXME: Image index is hard-coded as 0.  We assume we are working with only one (big) dataset in cluster mode.
            opClusterTaskWorker.Input.connect(finalOutputSlot)
            opClusterTaskWorker.RoiString[0].setValue(args._node_work_)
            opClusterTaskWorker.TaskName.setValue(task_name)
            opClusterTaskWorker.ConfigFilePath.setValue(
                args.option_config_file)

            # Configure optional slots first for efficiency (avoid multiple calls to setupOutputs)
            opClusterTaskWorker.SecondaryInputs[0].resize(
                len(secondaryOutputSlots))
            opClusterTaskWorker.SecondaryOutputDescriptions[0].resize(
                len(secondaryOutputSlots))
            for i in range(len(secondaryOutputSlots)):
                opClusterTaskWorker.SecondaryInputs[0][i].connect(
                    secondaryOutputSlots[i][0])
                opClusterTaskWorker.SecondaryOutputDescriptions[0][i].setValue(
                    secondaryOutputDescriptions[i])

            opClusterTaskWorker.OutputFilesetDescription.setValue(
                args.output_description_file)

            # If we have a way to report task progress (e.g. by updating the job name),
            #  then subscribe to progress signals
            if config.task_progress_update_command is not None:

                def report_progress(progress):
                    cmd = config.task_progress_update_command.format(
                        progress=int(progress))

                    def shell_call(shell_cmd):
                        logger.debug("Executing progress command: " + cmd)
                        subprocess.call(shell_cmd, shell=True)

                    background_tasks.put(functools.partial(shell_call, cmd))

                opClusterTaskWorker.innerOperators[0].progressSignal.subscribe(
                    report_progress)

            resultSlot = opClusterTaskWorker.ReturnCode
            clusterOperator = opClusterTaskWorker
        else:
            # We're the master
            opClusterizeMaster = OperatorWrapper(
                OpClusterize, parent=finalOutputSlot.getRealOperator().parent)

            opClusterizeMaster.Input.connect(finalOutputSlot)
            opClusterizeMaster.ProjectFilePath.setValue(args.project)
            opClusterizeMaster.OutputDatasetDescription.setValue(
                args.output_description_file)

            # Configure optional slots first for efficiency (avoid multiple calls to setupOutputs)
            opClusterizeMaster.SecondaryInputs[0].resize(
                len(secondaryOutputSlots))
            opClusterizeMaster.SecondaryOutputDescriptions[0].resize(
                len(secondaryOutputSlots))
            for i in range(len(secondaryOutputSlots)):
                opClusterizeMaster.SecondaryInputs[0][i].connect(
                    secondaryOutputSlots[i][0])
                opClusterizeMaster.SecondaryOutputDescriptions[0][i].setValue(
                    secondaryOutputDescriptions[i])

            opClusterizeMaster.ConfigFilePath.setValue(args.option_config_file)

            resultSlot = opClusterizeMaster.ReturnCode
            clusterOperator = opClusterizeMaster

        # Get the result
        logger.info("Starting task")
        result = resultSlot[
            0].value  # FIXME: The image index is hard-coded here.
    finally:
        logger.info("Cleaning up")
        global stop_background_tasks
        stop_background_tasks = True

        try:
            if clusterOperator is not None:
                clusterOperator.cleanUp()
        except:
            logger.error("Errors during cleanup.")

        try:
            logger.info("Closing project...")
            shell.closeCurrentProject()
        except:
            logger.error("Errors while closing project.")

    logger.info("FINISHED with result {}".format(result))
    if not result:
        logger.error("FAILED TO COMPLETE!")

    if rootLogHandler is not None:
        rootLogHandler.close()
コード例 #13
0
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
#		   http://ilastik.org/license.html
###############################################################################
import functools

from ilastik.shell.headless.headlessShell import HeadlessShell
from ilastik.workflows.pixelClassification.pixelClassificationWorkflow import PixelClassificationWorkflow

if __name__ == "__main__":
    shell = HeadlessShell(
        functools.partial(PixelClassificationWorkflow,
                          appendBatchOperators=False))
    shell.createBlankProjectFile("/tmp/testproject.ilp")
    workflow = shell.workflow

    d = {a.name: a for a in workflow.applets}

    d["Project Metadata"]._projectMetadata.projectName = "Test project"
    d["Project Metadata"]._projectMetadata.labeler = "Mr. Labeler"
    d["Project Metadata"]._projectMetadata.description = "Automatically generated"

    shell.projectManager.saveProject()
コード例 #14
0
ファイル: ilastik.py プロジェクト: christophdecker/ilastik
    sys.excepthook = print_exc_and_exit
    install_thread_excepthook()
# Show uncaught exceptions to the user (default behavior)
elif not ilastik_config.getboolean('ilastik', 'debug') and not parsed_args.headless:
    old_excepthook = sys.excepthook
    def exception_dialog(*exc_info):
        old_excepthook(*exc_info)
        try:
            from ilastik.shell.gui.startShellGui import shell
            shell.postErrorMessage( exc_info[0].__name__, str(exc_info[1]) )
        except:
            pass
    sys.excepthook = exception_dialog
    install_thread_excepthook()

if ilastik_config.getboolean("ilastik", "debug"):
    logger.info("Starting ilastik in debug mode.")

# Headless launch
if parsed_args.headless:
    from ilastik.shell.headless.headlessShell import HeadlessShell
    shell = HeadlessShell( workflow_cmdline_args )
    for f in init_funcs:
        f(shell)
# Normal launch
else:
    from ilastik.shell.gui.startShellGui import startShellGui
    sys.exit(startShellGui(workflow_cmdline_args, parsed_args.start_recording, *init_funcs))