コード例 #1
0
ファイル: ilastik.py プロジェクト: fdiego/ilastik
def main():
    if "--clean_paths" in sys.argv:
        this_path = os.path.dirname(__file__)
        ilastik_dir = os.path.abspath(os.path.join(this_path, "..%s.." % os.path.sep))
        _clean_paths( ilastik_dir )

    import ilastik_main
    parsed_args, workflow_cmdline_args = ilastik_main.parser.parse_known_args()
    
    # allow to start-up by double-clicking an '.ilp' file
    if len(workflow_cmdline_args) == 1 and \
       workflow_cmdline_args[0].endswith('.ilp') and \
       parsed_args.project is None:
            parsed_args.project = workflow_cmdline_args[0]
            workflow_cmdline_args = []

    # DEBUG EXAMPLES
    #parsed_args.project='/Users/bergs/MyProject.ilp'
    #parsed_args.headless = True
    #os.environ["LAZYFLOW_THREADS"] = "0"

    #parsed_args.headless = True
    #parsed_args.new_project = '/tmp/emptyproj.ilp'
    #parsed_args.workflow = "Pixel Classification"

    ilastik_main.main(parsed_args, workflow_cmdline_args)
コード例 #2
0
ファイル: ilastik.py プロジェクト: dagophil/autocontext
def main():
    parsed_args, workflow_cmdline_args = ilastik_main.parser.parse_known_args()
    
    # allow to start-up by double-clicking an '.ilp' file
    if len(workflow_cmdline_args) == 1 and \
       workflow_cmdline_args[0].endswith('.ilp') and \
       parsed_args.project is None:
            parsed_args.project = workflow_cmdline_args[0]
            workflow_cmdline_args = []

    # unpack the file with the datasets that should be predicted
    from argparse import ArgumentParser
    import os
    predict_file_parser = ArgumentParser()
    predict_file_parser.add_argument("--predict_file", type=str, required=False)
    predict_file, workflow_cmdline_args = predict_file_parser.parse_known_args(workflow_cmdline_args)
    predict_file = predict_file.predict_file
    if predict_file is not None:
        file_base, file_ext = os.path.split(predict_file)
        with open(predict_file, "r") as f:
            for l in f:
                l = l.strip()
                if len(l) > 0:
                    workflow_cmdline_args.append(os.path.join(file_base, l))
                
    # DEBUG EXAMPLES
    #parsed_args.project='/Users/bergs/MyProject.ilp'
    #parsed_args.headless = True

    ilastik_main.main(parsed_args, workflow_cmdline_args)
コード例 #3
0
ファイル: ilastik.py プロジェクト: cjaques/ilastik
def main():
    if "--clean_paths" in sys.argv:
        this_path = os.path.dirname(__file__)
        ilastik_dir = os.path.abspath(os.path.join(this_path, "..%s.." % os.path.sep))
        _clean_paths(ilastik_dir)

    import ilastik_main

    parsed_args, workflow_cmdline_args = ilastik_main.parser.parse_known_args()

    # sys.path.append()
    # allow to start-up by double-clicking an '.ilp' file
    if len(workflow_cmdline_args) == 1 and workflow_cmdline_args[0].endswith(".ilp") and parsed_args.project is None:
        parsed_args.project = workflow_cmdline_args[0]
        workflow_cmdline_args = []

    # DEBUG EXAMPLES
    # parsed_args.project='/Users/bergs/MyProject.ilp'
    # parsed_args.headless = True
    # Single threaded mode
    os.environ["LAZYFLOW_THREADS"] = "0"  # better put this here, so that when loading project, taken into account

    # parsed_args.headless = True
    # parsed_args.new_project = '/tmp/emptyproj.ilp'
    # parsed_args.workflow = "Pixel Classification"
    ilastik_main.main(parsed_args, workflow_cmdline_args)
    print parsed_args
コード例 #4
0
def main():
    parsed_args, workflow_cmdline_args = ilastik_main.parser.parse_known_args()

    # allow to start-up by double-clicking an '.ilp' file
    if len(workflow_cmdline_args) == 1 and \
       workflow_cmdline_args[0].endswith('.ilp') and \
       parsed_args.project is None:
        parsed_args.project = workflow_cmdline_args[0]
        workflow_cmdline_args = []

    # unpack the file with the datasets that should be predicted
    from argparse import ArgumentParser
    import os
    predict_file_parser = ArgumentParser()
    predict_file_parser.add_argument("--predict_file",
                                     type=str,
                                     required=False)
    predict_file, workflow_cmdline_args = predict_file_parser.parse_known_args(
        workflow_cmdline_args)
    predict_file = predict_file.predict_file
    if predict_file is not None:
        file_base, file_ext = os.path.split(predict_file)
        with open(predict_file, "r") as f:
            for l in f:
                l = l.strip()
                if len(l) > 0:
                    workflow_cmdline_args.append(os.path.join(file_base, l))

    # DEBUG EXAMPLES
    #parsed_args.project='/Users/bergs/MyProject.ilp'
    #parsed_args.headless = True

    ilastik_main.main(parsed_args, workflow_cmdline_args)
コード例 #5
0
ファイル: ilastik.py プロジェクト: stuarteberg/ilastik
def main():
    if "--clean_paths" in sys.argv:
        this_path = os.path.dirname(__file__)
        ilastik_dir = os.path.abspath(
            os.path.join(this_path, "..%s.." % os.path.sep))
        _clean_paths(ilastik_dir)

    import ilastik_main
    parsed_args, workflow_cmdline_args = ilastik_main.parser.parse_known_args()

    # allow to start-up by double-clicking an '.ilp' file
    if len(workflow_cmdline_args) == 1 and \
       workflow_cmdline_args[0].endswith('.ilp') and \
       parsed_args.project is None:
        parsed_args.project = workflow_cmdline_args[0]
        workflow_cmdline_args = []

    # DEBUG EXAMPLES
    #parsed_args.project='/Users/bergs/MyProject.ilp'
    #parsed_args.headless = True
    #os.environ["LAZYFLOW_THREADS"] = "0"

    #parsed_args.headless = True
    #parsed_args.new_project = '/tmp/emptyproj.ilp'
    #parsed_args.workflow = "Pixel Classification"

    ilastik_main.main(parsed_args, workflow_cmdline_args)
コード例 #6
0
ファイル: ilastik.py プロジェクト: kumartr/ilastik
def main():
    parsed_args, workflow_cmdline_args = ilastik_main.parser.parse_known_args()

    # DEBUG EXAMPLES
    # parsed_args.project='/Users/bergs/MyProject.ilp'
    # parsed_args.headless = True

    ilastik_main.main(parsed_args, workflow_cmdline_args)
コード例 #7
0
ファイル: ilastik.py プロジェクト: varunotelli/ilastik
def main():
    if "--clean_paths" in sys.argv:
        this_path = os.path.dirname(__file__)
        ilastik_dir = os.path.abspath(
            os.path.join(this_path, "..%s.." % os.path.sep))
        _clean_paths(ilastik_dir)

    import ilastik_main
    parsed_args, workflow_cmdline_args = ilastik_main.parser.parse_known_args()

    # allow to start-up by double-clicking an '.ilp' file
    if len(workflow_cmdline_args) == 1 and \
       workflow_cmdline_args[0].endswith('.ilp') and \
       parsed_args.project is None:
        parsed_args.project = workflow_cmdline_args[0]
        workflow_cmdline_args = []

    # DEVELOPERS:
    # Provide your command-line args here. See examples below.

    ## Auto-open an existing project
    #parsed_args.project='/Users/bergs/MyProject.ilp'
    #parsed_args.project='/magnetic/data/multicut-testdata/2d/MyMulticut2D.ilp'
    #parsed_args.project = '/Users/bergs/MyMulticutProject.ilp'
    #parsed_args.project = '/magnetic/data/multicut-testdata/chris-256/MyMulticutProject-chris256.ilp'
    #parsed_args.project = '/magnetic/data/flyem/fib25-neuroproof-validation/fib25-multicut/mc-training/mc-training-with-corrected-gt.ilp'

    ## Headless-mode options
    #parsed_args.headless = True
    #parsed_args.debug = True

    ## Override lazyflow environment settings
    #os.environ["LAZYFLOW_THREADS"] = "0"
    #os.environ["LAZYFLOW_TOTAL_RAM_MB"] = "8192"

    ## Provide workflow-specific args
    #workflow_cmdline_args += ["--retrain"]

    ## Provide batch inputs (for headless mode)
    #workflow_cmdline_args += ["/magnetic/data/cells/001cell.png",
    #                          "/magnetic/data/cells/002cell.png",
    #                          "/magnetic/data/cells/003cell.png" ]

    # Create a new project from scratch (instead of opening existing project)
    #parsed_args.new_project='/Users/bergs/MyProject.ilp'
    #parsed_args.workflow = 'Pixel Classification'
    #parsed_args.workflow = 'Object Classification (from pixel classification)'
    #parsed_args.workflow = 'Carving'

    ilastik_main.main(parsed_args, workflow_cmdline_args)
コード例 #8
0
ファイル: ilastik.py プロジェクト: DerThorsten/ilastik
def main():
    if "--clean_paths" in sys.argv:
        this_path = os.path.dirname(__file__)
        ilastik_dir = os.path.abspath(os.path.join(this_path, "..%s.." % os.path.sep))
        _clean_paths( ilastik_dir )

    import ilastik_main
    parsed_args, workflow_cmdline_args = ilastik_main.parser.parse_known_args()
    
    # allow to start-up by double-clicking an '.ilp' file
    if len(workflow_cmdline_args) == 1 and \
       workflow_cmdline_args[0].endswith('.ilp') and \
       parsed_args.project is None:
            parsed_args.project = workflow_cmdline_args[0]
            workflow_cmdline_args = []

    # DEVELOPERS:
    # Provide your command-line args here. See examples below.
    
    ## Auto-open an existing project
    #parsed_args.project='/Users/bergs/MyProject.ilp'
    #parsed_args.project='/magnetic/data/multicut-testdata/2d/MyMulticut2D.ilp'
    #parsed_args.project = '/Users/bergs/MyMulticutProject.ilp'
    #parsed_args.project = '/magnetic/data/multicut-testdata/chris-256/MyMulticutProject-chris256.ilp'
    #parsed_args.project = '/magnetic/data/flyem/fib25-neuroproof-validation/fib25-multicut/mc-training/mc-training-with-corrected-gt.ilp'

    ## Headless-mode options
    #parsed_args.headless = True
    #parsed_args.debug = True

    ## Override lazyflow environment settings
    #os.environ["LAZYFLOW_THREADS"] = "0"
    #os.environ["LAZYFLOW_TOTAL_RAM_MB"] = "8192"

    ## Provide workflow-specific args
    #workflow_cmdline_args += ["--retrain"]

    ## Provide batch inputs (for headless mode)
    #workflow_cmdline_args += ["/magnetic/data/cells/001cell.png",
    #                          "/magnetic/data/cells/002cell.png",
    #                          "/magnetic/data/cells/003cell.png" ]

    # Create a new project from scratch (instead of opening existing project)
    #parsed_args.new_project='/Users/bergs/MyProject.ilp'
    #parsed_args.workflow = 'Pixel Classification'
    #parsed_args.workflow = 'Object Classification (from pixel classification)'
    #parsed_args.workflow = 'Carving'

    ilastik_main.main(parsed_args, workflow_cmdline_args)
コード例 #9
0
ファイル: ilastik.py プロジェクト: ilastikdev/ilastik
def main():
    parsed_args, workflow_cmdline_args = ilastik_main.parser.parse_known_args()
    
    # allow to start-up by double-clicking an '.ilp' file
    if len(workflow_cmdline_args) == 1 and \
       workflow_cmdline_args[0].endswith('.ilp') and \
       parsed_args.project is None:
            parsed_args.project = workflow_cmdline_args[0]
            workflow_cmdline_args = []

    # DEBUG EXAMPLES
    #parsed_args.project='/Users/bergs/MyProject.ilp'
    #parsed_args.headless = True

    ilastik_main.main(parsed_args, workflow_cmdline_args)
コード例 #10
0
def main():
    parsed_args, workflow_cmdline_args = ilastik_main.parser.parse_known_args()

    # allow to start-up by double-clicking an '.ilp' file
    if len(workflow_cmdline_args) == 1 and \
       workflow_cmdline_args[0].endswith('.ilp') and \
       parsed_args.project is None:
        parsed_args.project = workflow_cmdline_args[0]
        workflow_cmdline_args = []

    # DEBUG EXAMPLES
    #parsed_args.project='/Users/bergs/MyProject.ilp'
    #parsed_args.headless = True

    ilastik_main.main(parsed_args, workflow_cmdline_args)
コード例 #11
0
    def testUsingPreloadedArryasWhenScriptingBatchProcessing(self):
        args = ilastik_main.parse_args([])
        args.headless = True
        args.project = self.PROJECT_FILE
        shell = ilastik_main.main(args)
        assert isinstance(shell.workflow, PixelClassificationWorkflow)

        # Obtain the training operator
        opPixelClassification = shell.workflow.pcApplet.topLevelOperator

        # Sanity checks
        assert len(opPixelClassification.InputImages) > 0
        assert opPixelClassification.Classifier.ready()

        input_data1 = numpy.random.randint(0, 255, (2, 20, 20, 5, 1)).astype(
            numpy.uint8)
        input_data2 = numpy.random.randint(0, 255, (2, 20, 20, 5, 1)).astype(
            numpy.uint8)

        role_data_dict = {
            "Raw Data": [
                PreloadedArrayDatasetInfo(preloaded_array=input_data1,
                                          axistags=vigra.AxisTags("tzyxc")),
                PreloadedArrayDatasetInfo(preloaded_array=input_data2,
                                          axistags=vigra.AxisTags("tzyxc")),
            ]
        }

        predictions = shell.workflow.batchProcessingApplet.run_export(
            role_data_dict, export_to_array=True)
        for result in predictions:
            assert result.shape == (2, 20, 20, 5, 2)
コード例 #12
0
ファイル: testAllHeadless.py プロジェクト: vibbits/ilastik
    def start_workflow_create_project_headless(self, workflow_class_tuple,
                                               temp_dir):
        """Tests project file creation via the command line
        Args:
            workflow_class_tuple (tuple): tuple returned from getAvailableWorkflows
              with (workflow_class, workflow_name, workflow_class.workflowDisplayName)
        """
        workflow_class, workflow_name, display_name = workflow_class_tuple
        logger.debug(f"starting {workflow_name}")
        project_file = generate_project_file_name(temp_dir, workflow_name)

        args = [
            "--headless", f"--new_project={project_file}",
            f"--workflow={workflow_name}"
        ]
        # Clear the existing commandline args so it looks like we're starting fresh.
        sys.argv = ["ilastik.py"]
        sys.argv.extend(args)

        # Start up the ilastik.py entry script as if we had launched it from the command line
        parsed_args, workflow_cmdline_args = ilastik_main.parse_known_args()

        shell = ilastik_main.main(parsed_args=parsed_args,
                                  workflow_cmdline_args=workflow_cmdline_args,
                                  init_logging=False)

        shell.closeCurrentProject()

        # now check if the project file has been created:
        assert os.path.exists(
            project_file
        ), f"Project File {project_file} creation not successful"
コード例 #13
0
 def create_test_files():
     tags = vigra.defaultAxistags("zyxc")
     tags['x'].resolution = 1.0
     tags['y'].resolution = 1.0
     tags['z'].resolution = 45.0
     tags['c'].description = 'intensity'
     with h5py.File(test_data_path, 'w') as f:
         f['zeros'] = numpy.zeros( (10, 100, 200, 1), dtype=numpy.uint8 )
         f['zeros'].attrs['axistags'] = tags.toJSON()
     
     import ilastik_main
     parsed_args, workflow_cmdline_args = ilastik_main.parser.parse_known_args()
     parsed_args.new_project = test_project_path
     parsed_args.workflow = "Pixel Classification"
     parsed_args.headless = True
 
     shell = ilastik_main.main(parsed_args, workflow_cmdline_args)    
     data_selection_applet = shell.workflow.dataSelectionApplet
     
     # To configure data selection, start with empty cmdline args and manually fill them in
     data_selection_args, _ = data_selection_applet.parse_known_cmdline_args([])
     data_selection_args.raw_data = [test_data_path + '/zeros']
     
     # Configure 
     data_selection_applet.configure_operator_with_parsed_args(data_selection_args)
     
     shell.projectManager.saveProject()        
     return data_selection_applet
コード例 #14
0
    def start_workflow_load_project_headless(self, workflow_class_tuple, temp_dir):
        """Tests opening project files in headless mode via the command line
        Args:
            workflow_class_tuple (tuple): tuple returned from getAvailableWorkflows
              with (workflow_class, workflow_name, workflow_class.workflowDisplayName)
        """
        workflow_class, workflow_name, display_name = workflow_class_tuple
        logger.debug(f'starting {workflow_name}')
        project_file = generate_project_file_name(temp_dir, workflow_name)

        self.create_project_file(workflow_class, project_file)
        assert os.path.exists(project_file), f"Project File {project_file} creation not successful"

        args = [
            '--headless',
            f'--project={project_file}',
        ]
        # Clear the existing commandline args so it looks like we're starting fresh.
        sys.argv = ['ilastik.py']
        sys.argv.extend(args)

        # Start up the ilastik.py entry script as if we had launched it from the command line
        parsed_args, workflow_cmdline_args = ilastik_main.parse_known_args()
        shell = ilastik_main.main(
            parsed_args=parsed_args, workflow_cmdline_args=workflow_cmdline_args, init_logging=False)

        shell.closeCurrentProject()
コード例 #15
0
 def open_test_files():
     import ilastik_main
     parsed_args, workflow_cmdline_args = ilastik_main.parser.parse_known_args()
     parsed_args.project = test_project_path
     parsed_args.headless = True
 
     shell = ilastik_main.main(parsed_args, workflow_cmdline_args)    
     return shell.workflow.dataSelectionApplet
コード例 #16
0
    def __call__(self):
        if self.headless != True:
            print("Only headless mode is allowed in these tests")
            raise NotImplementedError()

        if self.project == None:
            print("Missing project path")
            raise NotImplementedError()
     
        parsed_args = Namespace(clean_paths=self.clean_paths,configfile=self.configfile,debug=self.debug,exit_on_failure=self.exit_on_failure,
        exit_on_success=self.exit_on_success,
        fullscreen = self.fullscreen,headless=self.headless,logfile=self.logfile,new_project=self.new_project,
        playback_script=self.playback_script,playback_speed=self.playback_speed,process_name=self.process_name,project=self.project,readonly=self.readonly,redirect_output=self.redirect_output,start_recording=self.start_recording,workflow=self.workflow)
        
        workflow_cmdline_args = self.data_files
	
        ilastik_main.main(parsed_args, workflow_cmdline_args)
コード例 #17
0
def _open_project(project_path, init_logging=False):
    """
    Open a project file and return the HeadlessShell instance.
    """
    parsed_args = ilastik_main.parser.parse_args([])
    parsed_args.headless = True
    parsed_args.project = project_path
    # parsed_args.readonly = True
    parsed_args.readonly = ILP_READONLY
    parsed_args.debug = True  # possibly delete this?

    if ILP_RETRAIN:
        shell = ilastik_main.main(parsed_args,
                                  workflow_cmdline_args=['--retrain'],
                                  init_logging=init_logging)
    else:
        shell = ilastik_main.main(parsed_args, init_logging=init_logging)
    return shell
コード例 #18
0
def main():
    if "--clean_paths" in sys.argv:
        this_path = os.path.dirname(__file__)
        ilastik_dir = os.path.abspath(
            os.path.join(this_path, "..%s.." % os.path.sep))
        _clean_paths(ilastik_dir)

    import ilastik_main
    parsed_args, workflow_cmdline_args = ilastik_main.parser.parse_known_args()

    # allow to start-up by double-clicking an '.ilp' file
    if len(workflow_cmdline_args) == 1 and \
       workflow_cmdline_args[0].endswith('.ilp') and \
       parsed_args.project is None:
        parsed_args.project = workflow_cmdline_args[0]
        workflow_cmdline_args = []

    from argparse import ArgumentParser
    predict_file_parser = ArgumentParser()
    predict_file_parser.add_argument("--predict_file",
                                     type=str,
                                     required=False)
    predict_file, workflow_cmdline_args = predict_file_parser.parse_known_args(
        workflow_cmdline_args)
    predict_file = predict_file.predict_file
    if predict_file is not None:
        file_base, file_ext = os.path.split(predict_file)
        with open(predict_file, "r") as f:
            for l in f:
                l = l.strip()
                if len(l) > 0:
                    workflow_cmdline_args.append(os.path.join(file_base, l))

    # DEBUG EXAMPLES
    #parsed_args.project='/Users/bergs/MyProject.ilp'
    #parsed_args.headless = True
    #os.environ["LAZYFLOW_THREADS"] = "0"

    #parsed_args.headless = True
    #parsed_args.new_project = '/tmp/emptyproj.ilp'
    #parsed_args.workflow = "Pixel Classification"

    ilastik_main.main(parsed_args, workflow_cmdline_args)
コード例 #19
0
def open_project( project_path ):
    """
    Open a project file and return the HeadlessShell instance.
    """
    parsed_args = ilastik_main.parser.parse_args([])
    parsed_args.headless = True
    parsed_args.project = project_path

    shell = ilastik_main.main( parsed_args )
    return shell
コード例 #20
0
ファイル: ilastik.py プロジェクト: dagophil/autocontext
def main():
    if "--clean_paths" in sys.argv:
        this_path = os.path.dirname(__file__)
        ilastik_dir = os.path.abspath(os.path.join(this_path, "..%s.." % os.path.sep))
        _clean_paths( ilastik_dir )

    import ilastik_main
    parsed_args, workflow_cmdline_args = ilastik_main.parser.parse_known_args()
    
    # allow to start-up by double-clicking an '.ilp' file
    if len(workflow_cmdline_args) == 1 and \
       workflow_cmdline_args[0].endswith('.ilp') and \
       parsed_args.project is None:
            parsed_args.project = workflow_cmdline_args[0]
            workflow_cmdline_args = []

    from argparse import ArgumentParser
    predict_file_parser = ArgumentParser()
    predict_file_parser.add_argument("--predict_file", type=str, required=False)
    predict_file, workflow_cmdline_args = predict_file_parser.parse_known_args(workflow_cmdline_args)
    predict_file = predict_file.predict_file
    if predict_file is not None:
        file_base, file_ext = os.path.split(predict_file)
        with open(predict_file, "r") as f:
            for l in f:
                l = l.strip()
                if len(l) > 0:
                    workflow_cmdline_args.append(os.path.join(file_base, l))

    # DEBUG EXAMPLES
    #parsed_args.project='/Users/bergs/MyProject.ilp'
    #parsed_args.headless = True
    #os.environ["LAZYFLOW_THREADS"] = "0"

    #parsed_args.headless = True
    #parsed_args.new_project = '/tmp/emptyproj.ilp'
    #parsed_args.workflow = "Pixel Classification"

    ilastik_main.main(parsed_args, workflow_cmdline_args)
コード例 #21
0
def test_133_pc_oc_loading(project_path: pathlib.Path):
    args = ["--headless", f"--project={project_path}"]
    # Clear the existing commandline args so it looks like we're starting fresh.
    sys.argv = ["ilastik.py"]
    sys.argv.extend(args)

    # Start up the ilastik.py entry script as if we had launched it from the command line
    parsed_args, workflow_cmdline_args = ilastik_main.parse_known_args()

    shell = ilastik_main.main(parsed_args=parsed_args,
                              workflow_cmdline_args=workflow_cmdline_args,
                              init_logging=False)

    shell.closeCurrentProject()
コード例 #22
0
ファイル: ilastik.py プロジェクト: ukoethe/ilastik
def main():
    if "--clean_paths" in sys.argv:
        this_path = os.path.dirname(__file__)
        ilastik_dir = os.path.abspath(os.path.join(this_path, "..%s.." % os.path.sep))
        _clean_paths( ilastik_dir )

    import ilastik_main
    # Special command-line control over default tmp dir
    import ilastik.monkey_patches
    ilastik.monkey_patches.extend_arg_parser(ilastik_main.parser)
    parsed_args, workflow_cmdline_args = ilastik_main.parser.parse_known_args()
    
    # allow to start-up by double-clicking an '.ilp' file
    if len(workflow_cmdline_args) == 1 and \
       workflow_cmdline_args[0].endswith('.ilp') and \
       parsed_args.project is None:
            parsed_args.project = workflow_cmdline_args[0]
            workflow_cmdline_args = []

    # DEBUG EXAMPLES
    #parsed_args.project='/Users/bergs/MyProject.ilp'
    #parsed_args.headless = True

    ilastik_main.main(parsed_args, workflow_cmdline_args)
コード例 #23
0
    def _classify_with_ilastik(self, image):

        args = ilastik_main.parser.parse_args([])
        args.headless = True
        args.project = os.path.join(
            self.h5_directory.get_absolute_path(),
            self.classifier_file_name.value).encode("utf-8")

        input_data = image
        print input_data.shape
        input_data = vigra.taggedView(input_data, 'yxc')

        shell = ilastik_main.main(args)
        assert isinstance(shell.workflow, PixelClassificationWorkflow)

        # The training operator
        opPixelClassification = shell.workflow.pcApplet.topLevelOperator

        # Sanity checks
        assert len(opPixelClassification.InputImages) > 0
        assert opPixelClassification.Classifier.ready()

        #print opPixelClassification.

        label_names = opPixelClassification.LabelNames.value
        label_colors = opPixelClassification.LabelColors.value
        probability_colors = opPixelClassification.PmapColors.value

        print label_names, label_colors, probability_colors

        # Change the connections of the batch prediction pipeline so we can supply our own data.
        opBatchFeatures = shell.workflow.opBatchFeatures
        opBatchPredictionPipeline = shell.workflow.opBatchPredictionPipeline

        opBatchFeatures.InputImage.disconnect()
        opBatchFeatures.InputImage.resize(1)
        opBatchFeatures.InputImage[0].setValue(input_data)

        # Run prediction.
        assert len(
            opBatchPredictionPipeline.HeadlessPredictionProbabilities) == 1
        assert opBatchPredictionPipeline.HeadlessPredictionProbabilities[
            0].ready()
        predictions = opBatchPredictionPipeline.HeadlessPredictionProbabilities[
            0][:].wait()
        return predictions
コード例 #24
0
 def _classify_with_ilastik(self, image):
     
     
     args = ilastik_main.parser.parse_args([])
     args.headless = True
     args.project = os.path.join(
                     self.h5_directory.get_absolute_path(), 
                     self.classifier_file_name.value).encode("utf-8")
     
     input_data = image
     print input_data.shape
     input_data = vigra.taggedView( input_data, 'yxc' )
     
     shell = ilastik_main.main( args )
     assert isinstance(shell.workflow, PixelClassificationWorkflow)
     
     # The training operator
     opPixelClassification = shell.workflow.pcApplet.topLevelOperator
     
     # Sanity checks
     assert len(opPixelClassification.InputImages) > 0
     assert opPixelClassification.Classifier.ready()
     
     #print opPixelClassification.
     
     label_names = opPixelClassification.LabelNames.value
     label_colors = opPixelClassification.LabelColors.value
     probability_colors = opPixelClassification.PmapColors.value
     
     print label_names, label_colors, probability_colors
     
     # Change the connections of the batch prediction pipeline so we can supply our own data.
     opBatchFeatures = shell.workflow.opBatchFeatures
     opBatchPredictionPipeline = shell.workflow.opBatchPredictionPipeline
     
     opBatchFeatures.InputImage.disconnect()
     opBatchFeatures.InputImage.resize(1)
     opBatchFeatures.InputImage[0].setValue( input_data )
     
     # Run prediction.
     assert len(opBatchPredictionPipeline.HeadlessPredictionProbabilities) == 1
     assert opBatchPredictionPipeline.HeadlessPredictionProbabilities[0].ready()
     predictions = opBatchPredictionPipeline.HeadlessPredictionProbabilities[0][:].wait()
     return predictions
コード例 #25
0
ファイル: ilastik.py プロジェクト: ilastik/ilastik
def main():
    if '--clean_paths' in sys.argv:
        script_dir = pathlib.Path(__file__).parent
        ilastik_root = script_dir.parent.parent
        _clean_paths(ilastik_root)

    # Allow to start-up by double-clicking a project file.
    if len(sys.argv) == 2 and sys.argv[1].endswith('.ilp'):
        sys.argv.insert(1, '--project')

    arg_opts, env_vars = _parse_internal_config("internal-startup-options.cfg")
    sys.argv[1:1] = arg_opts
    os.environ.update(env_vars)

    import ilastik_main
    parsed_args, workflow_cmdline_args = ilastik_main.parse_known_args()

    hShell = ilastik_main.main(parsed_args, workflow_cmdline_args)
    # in headless mode the headless shell is returned and its project manager still has an open project file
    hShell.closeCurrentProject()
コード例 #26
0
def main():
    if '--clean_paths' in sys.argv:
        script_dir = pathlib.Path(__file__).parent
        ilastik_root = script_dir.parent.parent
        _clean_paths(ilastik_root)

    # Allow to start-up by double-clicking a project file.
    if len(sys.argv) == 2 and sys.argv[1].endswith('.ilp'):
        sys.argv.insert(1, '--project')

    arg_opts, env_vars = _parse_internal_config("internal-startup-options.cfg")
    sys.argv[1:1] = arg_opts
    os.environ.update(env_vars)

    import ilastik_main
    parsed_args, workflow_cmdline_args = ilastik_main.parse_known_args()

    hShell = ilastik_main.main(parsed_args, workflow_cmdline_args)
    # in headless mode the headless shell is returned and its project manager still has an open project file
    hShell.closeCurrentProject()
コード例 #27
0
ファイル: algorithms.py プロジェクト: neurodata/ndparse
def run_ilastik_pixel(input_data, classifier, threads=2, ram=2000):

    """
    Runs a pre-trained ilastik classifier on a volume of data
    Adapted from Stuart Berg's example here:
    https://github.com/ilastik/ilastik/blob/master/examples/example_python_client.py

    Arguments:
        input_data:  RAMONVolume containing a numpy array or raw numpy array

    Returns:
        pixel_out: The raw trained classifier
    """

    from collections import OrderedDict
    import vigra
    import os
    import ilastik_main
    from ilastik.applets.dataSelection import DatasetInfo
    from ilastik.workflows.pixelClassification \
        import PixelClassificationWorkflow

    # Before we start ilastik, prepare these environment variable settings.
    os.environ["LAZYFLOW_THREADS"] = str(threads)
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(ram)

    # Set the command-line arguments directly into argparse.Namespace object
    # Provide your project file, and don't forget to specify headless.
    args = ilastik_main.parser.parse_args([])
    args.headless = True
    args.project = classifier

    # Instantiate the 'shell', (an instance of ilastik.shell.HeadlessShell)
    # This also loads the project file into shell.projectManager
    shell = ilastik_main.main(args)
    assert isinstance(shell.workflow, PixelClassificationWorkflow)

    # Obtain the training operator
    opPixelClassification = shell.workflow.pcApplet.topLevelOperator

    # Sanity checks
    assert len(opPixelClassification.InputImages) > 0
    assert opPixelClassification.Classifier.ready()

    # For this example, we'll use random input data to "batch process"
    print((input_data.shape))

    # In this example, we're using 2D data (extra dimension for channel).
    # Tagging the data ensures that ilastik interprets the axes correctly.
    input_data = vigra.taggedView(input_data, 'xyz')

    # In case you're curious about which label class is which,
    # let's read the label names from the project file.
    label_names = opPixelClassification.LabelNames.value
    label_colors = opPixelClassification.LabelColors.value
    probability_colors = opPixelClassification.PmapColors.value

    print((label_names, label_colors, probability_colors))

    # Construct an OrderedDict of role-names -> DatasetInfos
    # (See PixelClassificationWorkflow.ROLE_NAMES)
    role_data_dict = OrderedDict([("Raw Data",
                                   [DatasetInfo(preloaded_array=input_data)])])

    # Run the export via the BatchProcessingApplet
    # Note: If you don't provide export_to_array, then the results will
    #       be exported to disk according to project's DataExport settings.
    #       In that case, run_export() returns None.
    predictions = shell.workflow.batchProcessingApplet.\
        run_export(role_data_dict, export_to_array=True)
    predictions = np.squeeze(predictions)
    print((predictions.dtype, predictions.shape))

    print("DONE.")

    return predictions
コード例 #28
0
def classify_pixel(input_data, classifier, threads=8, ram=4000):
    """
    Runs a pre-trained ilastik classifier on a volume of data
    Adapted from Stuart Berg's example here:
    https://github.com/ilastik/ilastik/blob/master/examples/example_python_client.py
    Arguments:
        input_data: data to be classified - 3D numpy array
        classifier: ilastik trained/classified file
        threads: number of thread to use for classifying input data
        ram: RAM to use in MB
    Returns:
        pixel_out: The raw trained classifier
    """

    import numpy as np
    import six
    import pdb
    from collections import OrderedDict
    import vigra
    import os
    import ilastik_main
    from ilastik.applets.dataSelection import DatasetInfo
    from ilastik.workflows.pixelClassification import PixelClassificationWorkflow

    # Before we start ilastik, prepare these environment variable settings.
    os.environ["LAZYFLOW_THREADS"] = str(threads)
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(ram)

    # Set the command-line arguments directly into argparse.Namespace object
    # Provide your project file, and don't forget to specify headless.
    args = ilastik_main.parser.parse_args([])
    args.headless = True
    args.project = classifier

    # Instantiate the 'shell', (an instance of ilastik.shell.HeadlessShell)
    # This also loads the project file into shell.projectManager
    shell = ilastik_main.main(args)
    assert isinstance(shell.workflow, PixelClassificationWorkflow)

    # Obtain the training operator
    opPixelClassification = shell.workflow.pcApplet.topLevelOperator

    # Sanity checks
    assert len(opPixelClassification.InputImages) > 0
    assert opPixelClassification.Classifier.ready()

    # For this example, we'll use random input data to "batch process"
    print("input_data.shape", input_data.shape)

    # In this example, we're using 2D data (extra dimension for channel).
    # Tagging the data ensures that ilastik interprets the axes correctly.
    input_data = vigra.taggedView(input_data, 'xyz')

    # In case you're curious about which label class is which,
    # let's read the label names from the project file.
    label_names = opPixelClassification.LabelNames.value
    label_colors = opPixelClassification.LabelColors.value
    probability_colors = opPixelClassification.PmapColors.value

    print("label_names, label_colors, probability_colors", label_names,
          label_colors, probability_colors)

    # Construct an OrderedDict of role-names -> DatasetInfos
    # (See PixelClassificationWorkflow.ROLE_NAMES)
    role_data_dict = OrderedDict([("Raw Data",
                                   [DatasetInfo(preloaded_array=input_data)])])

    # Run the export via the BatchProcessingApplet
    # Note: If you don't provide export_to_array, then the results will
    #       be exported to disk according to project's DataExport settings.
    #       In that case, run_export() returns None.

    predictions = shell.workflow.batchProcessingApplet.\
        run_export(role_data_dict, export_to_array=True)
    predictions = np.squeeze(predictions)
    print("predictions.dtype, predictions.shape", predictions.dtype,
          predictions.shape)

    print("DONE.")

    return predictions
def generate_untrained_project_file(
    new_project_path, raw_data_paths, feature_selections, label_names
):
    """
    Create a new project file from scratch, add the given raw data files,
    inject the corresponding labels, configure the given feature selections,
    and (if provided) override the classifier type ('factory').
    Finally, request the classifier object from the pipeline (which forces training),
    and save the project.
    new_project_path: Where to save the new project file
    raw_data_paths: A list of paths to the raw data images to train with
    label_data_paths: A list of paths to the label image data to train with
    feature_selections: A matrix of bool, representing the selected features
    labels: list of label names

    """
    import ilastik_main as app
    from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
    from ilastik.applets.dataSelection.opDataSelection import RelativeFilesystemDatasetInfo
    ##
    ## CREATE PROJECT
    ##

    # Manually configure the arguments to ilastik, as if they were parsed from the command line.
    # (Start with empty args and fill in below.)
    ilastik_args = app.parse_args([])
    ilastik_args.new_project = new_project_path
    ilastik_args.headless = True
    ilastik_args.workflow = "Pixel Classification"

    shell = app.main(ilastik_args)
    assert isinstance(shell.workflow, PixelClassificationWorkflow)

    ##
    ## CONFIGURE FILE PATHS
    ##

    data_selection_applet = shell.workflow.dataSelectionApplet
    input_infos = [RelativeFilesystemDatasetInfo(filePath=path) for path
                    in raw_data_paths]

    opDataSelection = data_selection_applet.topLevelOperator

    existing_lanes = len(opDataSelection.DatasetGroup)
    opDataSelection.DatasetGroup.resize(max(len(input_infos), existing_lanes))
    # Not sure if assuming role_index = 0 is allways valid
    role_index = 0
    for lane_index, info in enumerate(input_infos):
        if info:
            opDataSelection.DatasetGroup[lane_index][role_index].setValue(info)

    ##
    ## APPLY FEATURE MATRIX (from matrix above)
    ##

    opFeatures = shell.workflow.featureSelectionApplet.topLevelOperator
    opFeatures.Scales.setValue(ScalesList)
    opFeatures.FeatureIds.setValue(FeatureIds)
    opFeatures.SelectionMatrix.setValue(feature_selections)

    ##
    ## CUSTOMIZE CLASSIFIER TYPE
    ##

    opPixelClassification = shell.workflow.pcApplet.topLevelOperator

    ##
    ## READ/APPLY LABEL VOLUMES
    ##

    opPixelClassification.LabelNames.setValue(label_names)

    # save project file (includes the new classifier).
    shell.projectManager.saveProject(force_all_save=False)
コード例 #30
0
def run_ilastik_stage(stage_num,
                      ilp_path,
                      input_vol,
                      mask,
                      output_path,
                      LAZYFLOW_THREADS=1,
                      LAZYFLOW_TOTAL_RAM_MB=None,
                      logfile="/dev/null",
                      extra_cmdline_args=[]):
    import os
    from collections import OrderedDict

    import uuid
    import multiprocessing
    import platform
    import psutil
    import vigra

    import ilastik_main
    from ilastik.applets.dataSelection import DatasetInfo

    if LAZYFLOW_TOTAL_RAM_MB is None:
        # By default, assume our alotted RAM is proportional
        # to the CPUs we've been told to use
        machine_ram = psutil.virtual_memory().total
        machine_ram -= 1024**3  # Leave 1 GB RAM for the OS.

        LAZYFLOW_TOTAL_RAM_MB = LAZYFLOW_THREADS * machine_ram / multiprocessing.cpu_count(
        )

    # Before we start ilastik, prepare the environment variable settings.
    os.environ["LAZYFLOW_THREADS"] = str(LAZYFLOW_THREADS)
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(LAZYFLOW_TOTAL_RAM_MB)
    os.environ["LAZYFLOW_STATUS_MONITOR_SECONDS"] = "10"

    # Prepare ilastik's "command-line" arguments, as if they were already parsed.
    args, extra_workflow_cmdline_args = ilastik_main.parser.parse_known_args(
        extra_cmdline_args)
    args.headless = True
    args.debug = True  # ilastik's 'debug' flag enables special power features, including experimental workflows.
    args.project = ilp_path
    args.readonly = True

    # The process_name argument is prefixed to all log messages.
    # For now, just use the machine name and a uuid
    # FIXME: It would be nice to provide something more descriptive, like the ROI of the current spark job...
    args.process_name = platform.node() + "-" + str(
        uuid.uuid1()) + "-" + str(stage_num)

    # To avoid conflicts between processes, give each process it's own logfile to write to.
    if logfile != "/dev/null":
        base, ext = os.path.splitext(logfile)
        logfile = base + '.' + args.process_name + ext

    # By default, all ilastik processes duplicate their console output to ~/.ilastik_log.txt
    # Obviously, having all spark nodes write to a common file is a bad idea.
    # The "/dev/null" setting here is recognized by ilastik and means "Don't write a log file"
    args.logfile = logfile

    # Instantiate the 'shell', (in this case, an instance of ilastik.shell.HeadlessShell)
    # This also loads the project file into shell.projectManager
    shell = ilastik_main.main(args, extra_workflow_cmdline_args)

    ## Need to find a better way to verify the workflow type
    #from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
    #assert isinstance(shell.workflow, PixelClassificationWorkflow)

    opInteractiveExport = shell.workflow.batchProcessingApplet.dataExportApplet.topLevelOperator.getLane(
        0)
    opInteractiveExport.OutputFilenameFormat.setValue(output_path)
    opInteractiveExport.OutputInternalPath.setValue('predictions')
    opInteractiveExport.OutputFormat.setValue('hdf5')

    selected_result = opInteractiveExport.InputSelection.value
    num_channels = opInteractiveExport.Inputs[selected_result].meta.shape[-1]

    # Construct an OrderedDict of role-names -> DatasetInfos
    # (See PixelClassificationWorkflow.ROLE_NAMES)
    if isinstance(input_vol, str):
        role_data_dict = OrderedDict([("Raw Data",
                                       [DatasetInfo(filepath=input_vol)])])
    else:
        # If given raw data, we assume it's grayscale, zyx order (stage 1)
        raw_data_array = vigra.taggedView(input_vol, 'zyx')
        role_data_dict = OrderedDict([
            ("Raw Data", [DatasetInfo(preloaded_array=raw_data_array)])
        ])

    if mask is not None:
        # If there's a mask, we might be able to save some computation time.
        mask = vigra.taggedView(mask, 'zyx')
        role_data_dict["Prediction Mask"] = [DatasetInfo(preloaded_array=mask)]

    # Run the export via the BatchProcessingApplet
    export_paths = shell.workflow.batchProcessingApplet.run_export(
        role_data_dict, export_to_array=False)
    assert len(export_paths) == 1
    assert export_paths[
        0] == output_path + '/predictions', "Output path was {}".format(
            export_paths[0])
コード例 #31
0
def classify_pixel_hdf(hdf_data_set_name, classifier, threads, ram):
    
    """
    Interface function to Ilastik object classifier functions.function
    
    Runs a pre-trained ilastik classifier on a volume of data given in an hdf5 file
    Adapted from Stuart Berg's example here:
    https://github.com/ilastik/ilastik/blob/master/examples/example_python_client.py
    
    Parameters:
        hdf_data_set_name: dataset to be classified - 3D numpy array
        classifier: ilastik trained/classified file
        threads: number of thread to use for classifying input data
        ram: RAM to use in MB
    
    Returns:
        pixel_out: The probability maps for the classified pixels
    """
    
    # Before we start ilastik, prepare these environment variable settings.
    os.environ["LAZYFLOW_THREADS"] = str(threads)
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(ram)
    
    # Set the command-line arguments directly into argparse.Namespace object
    # Provide your project file, and don't forget to specify headless.
    args = ilastik_main.parser.parse_args([])
    args.headless = True
    args.project = classifier
    
    # Instantiate the 'shell', (an instance of ilastik.shell.HeadlessShell)
    # This also loads the project file into shell.projectManager
    shell = ilastik_main.main(args)
    assert isinstance(shell.workflow, PixelClassificationWorkflow)
    
    # Obtain the training operator
    opPixelClassification = shell.workflow.pcApplet.topLevelOperator
    
    # Sanity checks
    assert len(opPixelClassification.InputImages) > 0
    assert opPixelClassification.Classifier.ready()
    
    # In case you're curious about which label class is which,
    # let's read the label names from the project file.
    label_names = opPixelClassification.LabelNames.value
    label_colors = opPixelClassification.LabelColors.value
    probability_colors = opPixelClassification.PmapColors.value
    
    print("label_names, label_colors, probability_colors", label_names, label_colors, probability_colors)
    
    # Construct an OrderedDict of role-names -> DatasetInfos
    # (See PixelClassificationWorkflow.ROLE_NAMES)
    data_info = DatasetInfo(hdf_data_set_name)
    # Classifying a volume specified of dimensions of: slices, column and rows
    data_info.axistags = vigra.defaultAxistags('zyx'.encode('ascii')) 
    role_data_dict = OrderedDict([("Raw Data", [data_info])])
    # Run the export via the BatchProcessingApplet
    # Note: If you don't provide export_to_array, then the results will
    #       be exported to disk according to project's DataExport settings.
    #       In that case, run_export() returns None.
    
    hdf_dataset_path = shell.workflow.batchProcessingApplet.\
        run_export(role_data_dict, export_to_array=False)
    
    print("DONE WITH CLASSIFICATION.")
    
    return hdf_dataset_path
コード例 #32
0
def ilastik_multicut(grayscale, bounary_volume, supervoxels, ilp_path, LAZYFLOW_THREADS=1, LAZYFLOW_TOTAL_RAM_MB=None, logfile="/dev/null", extra_cmdline_args=[]):
    print('status=multicut')
    print("Starting ilastik_multicut() ...")
    print("grayscale volume: dtype={}, shape={}".format(str(grayscale.dtype), grayscale.shape))
    print("boundary volume: dtype={}, shape={}".format(str(bounary_volume.dtype), bounary_volume.shape))
    print("supervoxels volume: dtype={}, shape={}".format(str(supervoxels.dtype), supervoxels.shape))

    import os
    from collections import OrderedDict

    import uuid
    import multiprocessing
    import platform
    import psutil
    import vigra

    import ilastik_main
    from ilastik.applets.dataSelection import DatasetInfo

    print("ilastik_multicut(): Done with imports")

    if LAZYFLOW_TOTAL_RAM_MB is None:
        # By default, assume our alotted RAM is proportional 
        # to the CPUs we've been told to use
        machine_ram = psutil.virtual_memory().total
        machine_ram -= 1024**3 # Leave 1 GB RAM for the OS.

        LAZYFLOW_TOTAL_RAM_MB = LAZYFLOW_THREADS * machine_ram / multiprocessing.cpu_count()

    # Before we start ilastik, prepare the environment variable settings.
    os.environ["LAZYFLOW_THREADS"] = str(LAZYFLOW_THREADS)
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(LAZYFLOW_TOTAL_RAM_MB)
    os.environ["LAZYFLOW_STATUS_MONITOR_SECONDS"] = "10"

    extra_cmdline_args += ['--output_axis_order=zyx']
    # Prepare ilastik's "command-line" arguments, as if they were already parsed.
    args, extra_workflow_cmdline_args = ilastik_main.parser.parse_known_args(extra_cmdline_args)
    args.headless = True
    args.debug = True # ilastik's 'debug' flag enables special power features, including experimental workflows.
    args.project = str(ilp_path)
    args.readonly = True

    # The process_name argument is prefixed to all log messages.
    # For now, just use the machine name and a uuid
    # FIXME: It would be nice to provide something more descriptive, like the ROI of the current spark job...
    args.process_name = platform.node() + "-" + str(uuid.uuid1())

    # To avoid conflicts between processes, give each process it's own logfile to write to.
    if logfile != "/dev/null":
        base, ext = os.path.splitext(logfile)
        logfile = base + '.' + args.process_name + ext

    # By default, all ilastik processes duplicate their console output to ~/.ilastik_log.txt
    # Obviously, having all spark nodes write to a common file is a bad idea.
    # The "/dev/null" setting here is recognized by ilastik and means "Don't write a log file"
    args.logfile = logfile

    print("ilastik_multicut(): Creating shell...")

    # Instantiate the 'shell', (in this case, an instance of ilastik.shell.HeadlessShell)
    # This also loads the project file into shell.projectManager
    shell = ilastik_main.main( args, extra_workflow_cmdline_args )

    ## Need to find a better way to verify the workflow type
    #from ilastik.workflows.multicutWorkflow import MulticutWorkflow
    #assert isinstance(shell.workflow, MulticutWorkflow)

    # Construct an OrderedDict of role-names -> DatasetInfos
    # (See MulticutWorkflow.ROLE_NAMES)
    raw_data_array = vigra.taggedView(grayscale, 'zyx')
    probabilities_array = vigra.taggedView(bounary_volume, 'zyxc')
    superpixels_array = vigra.taggedView(supervoxels, 'zyx')
    
    role_data_dict = OrderedDict([ ("Raw Data", [ DatasetInfo(preloaded_array=raw_data_array) ]),
                                   ("Probabilities", [ DatasetInfo(preloaded_array=probabilities_array) ]),
                                   ("Superpixels", [ DatasetInfo(preloaded_array=superpixels_array) ]) ])

    print("ilastik_multicut(): Starting export...")

    # Run the export via the BatchProcessingApplet
    segmentation_list = shell.workflow.batchProcessingApplet.run_export(role_data_dict, export_to_array=True)
    assert len(segmentation_list) == 1
    segmentation = segmentation_list[0]

    assert segmentation.ndim == 3
    print('status=multicut finished')
    return segmentation
コード例 #33
0
def ilastik_multicut(grayscale,
                     bounary_volume,
                     supervoxels,
                     ilp_path,
                     LAZYFLOW_THREADS=1,
                     LAZYFLOW_TOTAL_RAM_MB=None,
                     logfile="/dev/null",
                     extra_cmdline_args=[]):
    print 'status=multicut'
    print "Starting ilastik_multicut() ..."
    print "grayscale volume: dtype={}, shape={}".format(
        str(grayscale.dtype), grayscale.shape)
    print "boundary volume: dtype={}, shape={}".format(
        str(bounary_volume.dtype), bounary_volume.shape)
    print "supervoxels volume: dtype={}, shape={}".format(
        str(supervoxels.dtype), supervoxels.shape)

    import os
    from collections import OrderedDict

    import uuid
    import multiprocessing
    import platform
    import psutil
    import vigra

    import ilastik_main
    from ilastik.applets.dataSelection import DatasetInfo

    print "ilastik_multicut(): Done with imports"

    if LAZYFLOW_TOTAL_RAM_MB is None:
        # By default, assume our alotted RAM is proportional
        # to the CPUs we've been told to use
        machine_ram = psutil.virtual_memory().total
        machine_ram -= 1024**3  # Leave 1 GB RAM for the OS.

        LAZYFLOW_TOTAL_RAM_MB = LAZYFLOW_THREADS * machine_ram / multiprocessing.cpu_count(
        )

    # Before we start ilastik, prepare the environment variable settings.
    os.environ["LAZYFLOW_THREADS"] = str(LAZYFLOW_THREADS)
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(LAZYFLOW_TOTAL_RAM_MB)
    os.environ["LAZYFLOW_STATUS_MONITOR_SECONDS"] = "10"

    extra_cmdline_args += ['--output_axis_order=zyx']
    # Prepare ilastik's "command-line" arguments, as if they were already parsed.
    args, extra_workflow_cmdline_args = ilastik_main.parser.parse_known_args(
        extra_cmdline_args)
    args.headless = True
    args.debug = True  # ilastik's 'debug' flag enables special power features, including experimental workflows.
    args.project = str(ilp_path)
    args.readonly = True

    # The process_name argument is prefixed to all log messages.
    # For now, just use the machine name and a uuid
    # FIXME: It would be nice to provide something more descriptive, like the ROI of the current spark job...
    args.process_name = platform.node() + "-" + str(uuid.uuid1())

    # To avoid conflicts between processes, give each process it's own logfile to write to.
    if logfile != "/dev/null":
        base, ext = os.path.splitext(logfile)
        logfile = base + '.' + args.process_name + ext

    # By default, all ilastik processes duplicate their console output to ~/.ilastik_log.txt
    # Obviously, having all spark nodes write to a common file is a bad idea.
    # The "/dev/null" setting here is recognized by ilastik and means "Don't write a log file"
    args.logfile = logfile

    print "ilastik_multicut(): Creating shell..."

    # Instantiate the 'shell', (in this case, an instance of ilastik.shell.HeadlessShell)
    # This also loads the project file into shell.projectManager
    shell = ilastik_main.main(args, extra_workflow_cmdline_args)

    ## Need to find a better way to verify the workflow type
    #from ilastik.workflows.multicutWorkflow import MulticutWorkflow
    #assert isinstance(shell.workflow, MulticutWorkflow)

    # Construct an OrderedDict of role-names -> DatasetInfos
    # (See MulticutWorkflow.ROLE_NAMES)
    raw_data_array = vigra.taggedView(grayscale, 'zyx')
    probabilities_array = vigra.taggedView(bounary_volume, 'zyxc')
    superpixels_array = vigra.taggedView(supervoxels, 'zyx')

    role_data_dict = OrderedDict([
        ("Raw Data", [DatasetInfo(preloaded_array=raw_data_array)]),
        ("Probabilities", [DatasetInfo(preloaded_array=probabilities_array)]),
        ("Superpixels", [DatasetInfo(preloaded_array=superpixels_array)])
    ])

    print "ilastik_multicut(): Starting export..."

    # Run the export via the BatchProcessingApplet
    segmentation_list = shell.workflow.batchProcessingApplet.run_export(
        role_data_dict, export_to_array=True)
    assert len(segmentation_list) == 1
    segmentation = segmentation_list[0]

    assert segmentation.ndim == 3
    print 'status=multicut finished'
    return segmentation
コード例 #34
0
from ilastik.applets.dataSelection import DatasetInfo
from ilastik.workflows.pixelClassification import PixelClassificationWorkflow

# Before we start ilastik, optionally prepare these environment variable settings.
os.environ["LAZYFLOW_THREADS"] = "2"
os.environ["LAZYFLOW_TOTAL_RAM_MB"] = "2000"

# Programmatically set the command-line arguments directly into the argparse.Namespace object
# Provide your project file, and don't forget to specify headless.
args = ilastik_main.parser.parse_args([])
args.headless = True
args.project = '/Users/bergs/MyProject.ilp'  # REPLACE WITH YOUR PROJECT FILE

# Instantiate the 'shell', (in this case, an instance of ilastik.shell.HeadlessShell)
# This also loads the project file into shell.projectManager
shell = ilastik_main.main(args)
assert isinstance(shell.workflow, PixelClassificationWorkflow)

# Obtain the training operator
opPixelClassification = shell.workflow.pcApplet.topLevelOperator

# Sanity checks
assert len(opPixelClassification.InputImages) > 0
assert opPixelClassification.Classifier.ready()

# For this example, we'll use random input data to "batch process"
input_data1 = numpy.random.randint(0, 255, (200, 200, 1)).astype(numpy.uint8)
input_data2 = numpy.random.randint(0, 255, (300, 300, 1)).astype(numpy.uint8)
print input_data1.shape

# In this example, we're using 2D data (with an extra dimension for  channel).
コード例 #35
0
def ilastik_predict_with_array(gray_vol,
                               mask,
                               ilp_path,
                               selected_channels=None,
                               normalize=True,
                               LAZYFLOW_THREADS=1,
                               LAZYFLOW_TOTAL_RAM_MB=None,
                               logfile="/dev/null",
                               extra_cmdline_args=[]):
    """
    Using ilastik's python API, open the given project 
    file and run a prediction on the given raw data array.
    
    Other than the project file, nothing is read or written 
    using the hard disk.
    
    gray_vol: A 3D numpy array with axes zyx

    mask: A binary image where 0 means "no prediction necessary".
         'None' can be given, which means "predict everything".

    ilp_path: Path to the project file.  ilastik also accepts a url to a DVID key-value, which will be downloaded and opened as an ilp
    
    selected_channels: A list of channel indexes to select and return from the prediction results.
                       'None' can also be given, which means "return all prediction channels".
                       You may also return a *nested* list, in which case groups of channels can be
                       combined (summed) into their respective output channels.
                       For example: selected_channels=[0,3,[2,4],7] means the output will have 4 channels:
                                    0,3,2+4,7 (channels 5 and 6 are simply dropped).
    
    normalize: Renormalize all outputs so the channels sum to 1 everywhere.
               That is, (predictions.sum(axis=-1) == 1.0).all()
               Note: Pixels with 0.0 in all channels will be simply given a value of 1/N in all channels.
    
    LAZYFLOW_THREADS, LAZYFLOW_TOTAL_RAM_MB: Passed to ilastik via environment variables.
    """
    print "ilastik_predict_with_array(): Starting with raw data: dtype={}, shape={}".format(
        str(gray_vol.dtype), gray_vol.shape)

    import os
    from collections import OrderedDict

    import uuid
    import multiprocessing
    import platform
    import psutil
    import vigra

    import ilastik_main
    from ilastik.applets.dataSelection import DatasetInfo
    from lazyflow.operators.cacheMemoryManager import CacheMemoryManager

    import logging
    logging.getLogger(__name__).info('status=ilastik prediction')
    print "ilastik_predict_with_array(): Done with imports"

    if LAZYFLOW_TOTAL_RAM_MB is None:
        # By default, assume our alotted RAM is proportional
        # to the CPUs we've been told to use
        machine_ram = psutil.virtual_memory().total
        machine_ram -= 1024**3  # Leave 1 GB RAM for the OS.

        LAZYFLOW_TOTAL_RAM_MB = LAZYFLOW_THREADS * machine_ram / multiprocessing.cpu_count(
        )

    # Before we start ilastik, prepare the environment variable settings.
    os.environ["LAZYFLOW_THREADS"] = str(LAZYFLOW_THREADS)
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(LAZYFLOW_TOTAL_RAM_MB)
    os.environ["LAZYFLOW_STATUS_MONITOR_SECONDS"] = "10"

    # Prepare ilastik's "command-line" arguments, as if they were already parsed.
    args, extra_workflow_cmdline_args = ilastik_main.parser.parse_known_args(
        extra_cmdline_args)
    args.headless = True
    args.debug = True  # ilastik's 'debug' flag enables special power features, including experimental workflows.
    args.project = str(ilp_path)
    args.readonly = True

    # The process_name argument is prefixed to all log messages.
    # For now, just use the machine name and a uuid
    # FIXME: It would be nice to provide something more descriptive, like the ROI of the current spark job...
    args.process_name = platform.node() + "-" + str(uuid.uuid1())

    # To avoid conflicts between processes, give each process it's own logfile to write to.
    if logfile != "/dev/null":
        base, ext = os.path.splitext(logfile)
        logfile = base + '.' + args.process_name + ext

    # By default, all ilastik processes duplicate their console output to ~/.ilastik_log.txt
    # Obviously, having all spark nodes write to a common file is a bad idea.
    # The "/dev/null" setting here is recognized by ilastik and means "Don't write a log file"
    args.logfile = logfile

    print "ilastik_predict_with_array(): Creating shell..."

    # Instantiate the 'shell', (in this case, an instance of ilastik.shell.HeadlessShell)
    # This also loads the project file into shell.projectManager
    shell = ilastik_main.main(args, extra_workflow_cmdline_args)

    ## Need to find a better way to verify the workflow type
    #from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
    #assert isinstance(shell.workflow, PixelClassificationWorkflow)

    # Construct an OrderedDict of role-names -> DatasetInfos
    # (See PixelClassificationWorkflow.ROLE_NAMES)
    raw_data_array = vigra.taggedView(gray_vol, 'zyx')
    role_data_dict = OrderedDict([
        ("Raw Data", [DatasetInfo(preloaded_array=raw_data_array)])
    ])

    if mask is not None:
        # If there's a mask, we might be able to save some computation time.
        mask = vigra.taggedView(mask, 'zyx')
        role_data_dict["Prediction Mask"] = [DatasetInfo(preloaded_array=mask)]

    print "ilastik_predict_with_array(): Starting export..."

    # Sanity checks
    opInteractiveExport = shell.workflow.batchProcessingApplet.dataExportApplet.topLevelOperator.getLane(
        0)
    selected_result = opInteractiveExport.InputSelection.value
    num_channels = opInteractiveExport.Inputs[selected_result].meta.shape[-1]

    # For convenience, verify the selected channels before we run the export.
    if selected_channels:
        assert isinstance(selected_channels, list)
        for selection in selected_channels:
            if isinstance(selection, list):
                assert all(c < num_channels for c in selection), \
                    "Selected channels ({}) exceed number of prediction classes ({})"\
                    .format( selected_channels, num_channels )
            else:
                assert selection < num_channels, \
                    "Selected channels ({}) exceed number of prediction classes ({})"\
                    .format( selected_channels, num_channels )

    # Run the export via the BatchProcessingApplet
    prediction_list = shell.workflow.batchProcessingApplet.run_export(
        role_data_dict, export_to_array=True)
    assert len(prediction_list) == 1
    predictions = prediction_list[0]

    assert predictions.shape[-1] == num_channels
    selected_predictions = select_channels(predictions, selected_channels)

    if normalize:
        normalize_channels_in_place(selected_predictions)

    # Cleanup: kill cache monitor thread
    CacheMemoryManager().stop()
    CacheMemoryManager.instance = None

    # Cleanup environment
    del os.environ["LAZYFLOW_THREADS"]
    del os.environ["LAZYFLOW_TOTAL_RAM_MB"]
    del os.environ["LAZYFLOW_STATUS_MONITOR_SECONDS"]

    logging.getLogger(__name__).info('status=ilastik prediction finished')
    return selected_predictions
コード例 #36
0
def runWorkflow(cluster_args):
    ilastik_main_args = ilastik_main.parse_args([])
    # Copy relevant args from cluster cmdline options to ilastik_main cmdline options
    ilastik_main_args.headless = True
    ilastik_main_args.project = cluster_args.project
    ilastik_main_args.process_name = cluster_args.process_name

    # Nodes should not write to a common logfile.
    # Override with /dev/null
    if cluster_args._node_work_ is None:
        ilastik_main_args.logfile = cluster_args.logfile
    else:
        ilastik_main_args.logfile = "/dev/null"

    assert cluster_args.project is not None, "Didn't get a project file."

    # Read the config file
    configFilePath = cluster_args.option_config_file
    config = parseClusterConfigFile(configFilePath)

    # Update the monkey_patch settings
    ilastik.monkey_patches.apply_setting_dict(config.__dict__)

    # Configure the thread count.
    # Nowadays, this is done via an environment variable setting for ilastik_main to detect.
    if cluster_args._node_work_ is not None and config.task_threadpool_size is not None:
        os.environ["LAZYFLOW_THREADS"] = str(config.task_threadpool_size)

    if cluster_args._node_work_ is not None and config.task_total_ram_mb is not None:
        os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(config.task_total_ram_mb)

    # Instantiate 'shell' by calling ilastik_main with our
    shell = ilastik_main.main(ilastik_main_args)
    workflow = shell.projectManager.workflow

    # Attach cluster operators
    resultSlot = None
    finalOutputSlot = workflow.getHeadlessOutputSlot(config.output_slot_id)
    assert finalOutputSlot is not None

    clusterOperator = None
    try:
        if cluster_args._node_work_ is not None:
            clusterOperator, resultSlot = prepare_node_cluster_operator(
                config, cluster_args, finalOutputSlot)
        else:
            clusterOperator, resultSlot = prepare_master_cluster_operator(
                cluster_args, finalOutputSlot)

        # Get the result
        logger.info("Starting task")
        result = resultSlot[
            0].value  # FIXME: The image index is hard-coded here.
    finally:
        logger.info("Cleaning up")
        global stop_background_tasks
        stop_background_tasks = True

        try:
            if clusterOperator is not None:
                clusterOperator.cleanUp()
        except:
            logger.error("Errors during cleanup.")

        try:
            logger.info("Closing project...")
            shell.closeCurrentProject()
        except:
            logger.error("Errors while closing project.")

    logger.info("FINISHED with result {}".format(result))
    if not result:
        logger.error("FAILED TO COMPLETE!")
コード例 #37
0
import numpy
import vigra

import ilastik_main
from ilastik.workflows.pixelClassification import PixelClassificationWorkflow

args = ilastik_main.parser.parse_args([])
args.headless = True
args.project = '/Users/bergs/MyProject.ilp'

shell = ilastik_main.main( args )
assert isinstance(shell.workflow, PixelClassificationWorkflow)

# The training operator
opPixelClassification = shell.workflow.pcApplet.topLevelOperator

# Sanity checks
assert len(opPixelClassification.InputImages) > 0
assert opPixelClassification.Classifier.ready()

# For this example, we'll use random input data to "batch process"
input_data = numpy.random.randint(0,255, (200,200,1) ).astype(numpy.uint8)
print input_data.shape
input_data = vigra.taggedView( input_data, 'yxc' )

label_names = opPixelClassification.LabelNames.value
label_colors = opPixelClassification.LabelColors.value
probability_colors = opPixelClassification.PmapColors.value

print label_names, label_colors, probability_colors
コード例 #38
0
def ilastik_predict_with_array(gray_vol, mask, ilp_path, selected_channels=None, normalize=True, 
                               LAZYFLOW_THREADS=1, LAZYFLOW_TOTAL_RAM_MB=None, logfile="/dev/null", extra_cmdline_args=[]):
    """
    Using ilastik's python API, open the given project 
    file and run a prediction on the given raw data array.
    
    Other than the project file, nothing is read or written 
    using the hard disk.
    
    gray_vol: A 3D numpy array with axes zyx

    mask: A binary image where 0 means "no prediction necessary".
         'None' can be given, which means "predict everything".

    ilp_path: Path to the project file.  ilastik also accepts a url to a DVID key-value, which will be downloaded and opened as an ilp
    
    selected_channels: A list of channel indexes to select and return from the prediction results.
                       'None' can also be given, which means "return all prediction channels".
                       You may also return a *nested* list, in which case groups of channels can be
                       combined (summed) into their respective output channels.
                       For example: selected_channels=[0,3,[2,4],7] means the output will have 4 channels:
                                    0,3,2+4,7 (channels 5 and 6 are simply dropped).
    
    normalize: Renormalize all outputs so the channels sum to 1 everywhere.
               That is, (predictions.sum(axis=-1) == 1.0).all()
               Note: Pixels with 0.0 in all channels will be simply given a value of 1/N in all channels.
    
    LAZYFLOW_THREADS, LAZYFLOW_TOTAL_RAM_MB: Passed to ilastik via environment variables.
    """
    print "ilastik_predict_with_array(): Starting with raw data: dtype={}, shape={}".format(str(gray_vol.dtype), gray_vol.shape)

    import os
    from collections import OrderedDict

    import uuid
    import multiprocessing
    import platform
    import psutil
    import vigra

    import ilastik_main
    from ilastik.applets.dataSelection import DatasetInfo

    print "ilastik_predict_with_array(): Done with imports"

    if LAZYFLOW_TOTAL_RAM_MB is None:
        # By default, assume our alotted RAM is proportional 
        # to the CPUs we've been told to use
        machine_ram = psutil.virtual_memory().total
        machine_ram -= 1024**3 # Leave 1 GB RAM for the OS.

        LAZYFLOW_TOTAL_RAM_MB = LAZYFLOW_THREADS * machine_ram / multiprocessing.cpu_count()

    # Before we start ilastik, prepare the environment variable settings.
    os.environ["LAZYFLOW_THREADS"] = str(LAZYFLOW_THREADS)
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(LAZYFLOW_TOTAL_RAM_MB)
    os.environ["LAZYFLOW_STATUS_MONITOR_SECONDS"] = "10"

    # Prepare ilastik's "command-line" arguments, as if they were already parsed.
    args, extra_workflow_cmdline_args = ilastik_main.parser.parse_known_args(extra_cmdline_args)
    args.headless = True
    args.debug = True # ilastik's 'debug' flag enables special power features, including experimental workflows.
    args.project = ilp_path
    args.readonly = True

    # By default, all ilastik processes duplicate their console output to ~/.ilastik_log.txt
    # Obviously, having all spark nodes write to a common file is a bad idea.
    # The "/dev/null" setting here is recognized by ilastik and means "Don't write a log file"
    args.logfile = logfile

    # The process_name argument is prefixed to all log messages.
    # For now, just use the machine name and a uuid
    # FIXME: It would be nice to provide something more descriptive, like the ROI of the current spark job...
    args.process_name = platform.node() + "-" + str(uuid.uuid1())

    print "ilastik_predict_with_array(): Creating shell..."

    # Instantiate the 'shell', (in this case, an instance of ilastik.shell.HeadlessShell)
    # This also loads the project file into shell.projectManager
    shell = ilastik_main.main( args, extra_workflow_cmdline_args )

    ## Need to find a better way to verify the workflow type
    #from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
    #assert isinstance(shell.workflow, PixelClassificationWorkflow)

    # Construct an OrderedDict of role-names -> DatasetInfos
    # (See PixelClassificationWorkflow.ROLE_NAMES)
    raw_data_array = vigra.taggedView(gray_vol, 'zyx')
    role_data_dict = OrderedDict([ ("Raw Data", [ DatasetInfo(preloaded_array=raw_data_array) ]) ])
    
    if mask is not None:
        # If there's a mask, we might be able to save some computation time.
        mask = vigra.taggedView(mask, 'zyx')
        role_data_dict["Prediction Mask"] = [ DatasetInfo(preloaded_array=mask) ]

    print "ilastik_predict_with_array(): Starting export..."

    # Sanity checks
    opInteractiveExport = shell.workflow.batchProcessingApplet.dataExportApplet.topLevelOperator.getLane(0)
    selected_result = opInteractiveExport.InputSelection.value
    num_channels = opInteractiveExport.Inputs[selected_result].meta.shape[-1]
    
    # For convenience, verify the selected channels before we run the export.
    if selected_channels:
        assert isinstance(selected_channels, list)
        for selection in selected_channels:
            if isinstance(selection, list):
                assert all(c < num_channels for c in selection), \
                    "Selected channels ({}) exceed number of prediction classes ({})"\
                    .format( selected_channels, num_channels )
            else:
                assert selection < num_channels, \
                    "Selected channels ({}) exceed number of prediction classes ({})"\
                    .format( selected_channels, num_channels )
                

    # Run the export via the BatchProcessingApplet
    prediction_list = shell.workflow.batchProcessingApplet.run_export(role_data_dict, export_to_array=True)
    assert len(prediction_list) == 1
    predictions = prediction_list[0]

    assert predictions.shape[-1] == num_channels
    selected_predictions = select_channels(predictions, selected_channels)

    if normalize:
        normalize_channels_in_place(selected_predictions)
    
    return selected_predictions
コード例 #39
0
ファイル: train_headless.py プロジェクト: ilastik/ilastik
def generate_trained_project_file(
    new_project_path, raw_data_paths, label_data_paths, feature_selections, classifier_factory=None
):
    """
    Create a new project file from scratch, add the given raw data files,
    inject the corresponding labels, configure the given feature selections,
    and (if provided) override the classifier type ('factory').
    
    Finally, request the classifier object from the pipeline (which forces training),
    and save the project.
    
    new_project_path: Where to save the new project file
    raw_data_paths: A list of paths to the raw data images to train with
    label_data_paths: A list of paths to the label image data to train with
    feature_selections: A matrix of bool, representing the selected features
    classifier_factory: Override the classifier type.  Must be a subclass of either:
                        - lazyflow.classifiers.LazyflowVectorwiseClassifierFactoryABC
                        - lazyflow.classifiers.LazyflowPixelwiseClassifierFactoryABC
    """
    assert len(raw_data_paths) == len(label_data_paths), "Number of label images must match number of raw images."

    import ilastik_main
    from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
    from lazyflow.graph import Graph
    from lazyflow.operators.ioOperators import OpInputDataReader
    from lazyflow.roi import roiToSlice, roiFromShape

    ##
    ## CREATE PROJECT
    ##

    # Manually configure the arguments to ilastik, as if they were parsed from the command line.
    # (Start with empty args and fill in below.)
    ilastik_args = ilastik_main.parser.parse_args([])
    ilastik_args.new_project = new_project_path
    ilastik_args.headless = True
    ilastik_args.workflow = "Pixel Classification"

    shell = ilastik_main.main(ilastik_args)
    assert isinstance(shell.workflow, PixelClassificationWorkflow)

    ##
    ## CONFIGURE GRAYSCALE INPUT
    ##

    data_selection_applet = shell.workflow.dataSelectionApplet

    # To configure data selection, start with empty cmdline args and manually fill them in
    data_selection_args, _ = data_selection_applet.parse_known_cmdline_args([], PixelClassificationWorkflow.ROLE_NAMES)
    data_selection_args.raw_data = raw_data_paths
    data_selection_args.preconvert_stacks = True

    # Simplest thing here is to configure using cmd-line interface
    data_selection_applet.configure_operator_with_parsed_args(data_selection_args)

    ##
    ## APPLY FEATURE MATRIX (from matrix above)
    ##

    opFeatures = shell.workflow.featureSelectionApplet.topLevelOperator
    opFeatures.Scales.setValue(ScalesList)
    opFeatures.FeatureIds.setValue(FeatureIds)
    opFeatures.SelectionMatrix.setValue(feature_selections)

    ##
    ## CUSTOMIZE CLASSIFIER TYPE
    ##

    opPixelClassification = shell.workflow.pcApplet.topLevelOperator
    if classifier_factory is not None:
        opPixelClassification.ClassifierFactory.setValue(classifier_factory)

    ##
    ## READ/APPLY LABEL VOLUMES
    ##

    # Read each label volume and inject the label data into the appropriate training slot
    cwd = os.getcwd()
    max_label_class = 0
    for lane, label_data_path in enumerate(label_data_paths):
        graph = Graph()
        opReader = OpInputDataReader(graph=graph)
        try:
            opReader.WorkingDirectory.setValue(cwd)
            opReader.FilePath.setValue(label_data_path)

            print "Reading label volume: {}".format(label_data_path)
            label_volume = opReader.Output[:].wait()
        finally:
            opReader.cleanUp()

        raw_shape = opPixelClassification.InputImages[lane].meta.shape
        if label_volume.ndim != len(raw_shape):
            # Append a singleton channel axis
            assert label_volume.ndim == len(raw_shape) - 1
            label_volume = label_volume[..., None]

        # Auto-calculate the max label value
        max_label_class = max(max_label_class, label_volume.max())

        print "Applying label volume to lane #{}".format(lane)
        entire_volume_slicing = roiToSlice(*roiFromShape(label_volume.shape))
        opPixelClassification.LabelInputs[lane][entire_volume_slicing] = label_volume

    assert max_label_class > 1, "Not enough label classes were found in your label data."
    label_names = map(str, range(max_label_class))
    opPixelClassification.LabelNames.setValue(label_names)

    ##
    ## TRAIN CLASSIFIER
    ##

    # Make sure the caches in the pipeline are not 'frozen'.
    # (This is the equivalent of 'live update' mode in the GUI.)
    opPixelClassification.FreezePredictions.setValue(False)

    # Request the classifier object from the pipeline.
    # This forces the pipeline to produce (train) the classifier.
    _ = opPixelClassification.Classifier.value

    ##
    ## SAVE PROJECT
    ##

    # save project file (includes the new classifier).
    shell.projectManager.saveProject(force_all_save=False)
コード例 #40
0
def run_ilastik_stage(stage_num, ilp_path, input_vol, mask, output_path,
                      LAZYFLOW_THREADS=1, LAZYFLOW_TOTAL_RAM_MB=None, logfile="/dev/null", extra_cmdline_args=[]):
    import os
    from collections import OrderedDict

    import uuid
    import multiprocessing
    import platform
    import psutil
    import vigra

    import ilastik_main
    from ilastik.applets.dataSelection import DatasetInfo

    if LAZYFLOW_TOTAL_RAM_MB is None:
        # By default, assume our alotted RAM is proportional 
        # to the CPUs we've been told to use
        machine_ram = psutil.virtual_memory().total
        machine_ram -= 1024**3 # Leave 1 GB RAM for the OS.

        LAZYFLOW_TOTAL_RAM_MB = LAZYFLOW_THREADS * machine_ram / multiprocessing.cpu_count()

    # Before we start ilastik, prepare the environment variable settings.
    os.environ["LAZYFLOW_THREADS"] = str(LAZYFLOW_THREADS)
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(LAZYFLOW_TOTAL_RAM_MB)
    os.environ["LAZYFLOW_STATUS_MONITOR_SECONDS"] = "10"

    # Prepare ilastik's "command-line" arguments, as if they were already parsed.
    args, extra_workflow_cmdline_args = ilastik_main.parser.parse_known_args(extra_cmdline_args)
    args.headless = True
    args.debug = True # ilastik's 'debug' flag enables special power features, including experimental workflows.
    args.project = ilp_path
    args.readonly = True

    # The process_name argument is prefixed to all log messages.
    # For now, just use the machine name and a uuid
    # FIXME: It would be nice to provide something more descriptive, like the ROI of the current spark job...
    args.process_name = platform.node() + "-" + str(uuid.uuid1()) + "-" + str(stage_num)

    # To avoid conflicts between processes, give each process it's own logfile to write to.
    if logfile != "/dev/null":
        base, ext = os.path.splitext(logfile)
        logfile = base + '.' + args.process_name + ext

    # By default, all ilastik processes duplicate their console output to ~/.ilastik_log.txt
    # Obviously, having all spark nodes write to a common file is a bad idea.
    # The "/dev/null" setting here is recognized by ilastik and means "Don't write a log file"
    args.logfile = logfile

    # Instantiate the 'shell', (in this case, an instance of ilastik.shell.HeadlessShell)
    # This also loads the project file into shell.projectManager
    shell = ilastik_main.main( args, extra_workflow_cmdline_args )

    ## Need to find a better way to verify the workflow type
    #from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
    #assert isinstance(shell.workflow, PixelClassificationWorkflow)

    opInteractiveExport = shell.workflow.batchProcessingApplet.dataExportApplet.topLevelOperator.getLane(0)
    opInteractiveExport.OutputFilenameFormat.setValue(output_path)
    opInteractiveExport.OutputInternalPath.setValue('predictions')
    opInteractiveExport.OutputFormat.setValue('hdf5')
    
    selected_result = opInteractiveExport.InputSelection.value
    num_channels = opInteractiveExport.Inputs[selected_result].meta.shape[-1]

    # Construct an OrderedDict of role-names -> DatasetInfos
    # (See PixelClassificationWorkflow.ROLE_NAMES)
    if isinstance(input_vol, str):
        role_data_dict = OrderedDict([ ("Raw Data", [ DatasetInfo(filepath=input_vol) ]) ])
    else:
        # If given raw data, we assume it's grayscale, zyx order (stage 1)
        raw_data_array = vigra.taggedView(input_vol, 'zyx')
        role_data_dict = OrderedDict([ ("Raw Data", [ DatasetInfo(preloaded_array=raw_data_array) ]) ])
    
    if mask is not None:
        # If there's a mask, we might be able to save some computation time.
        mask = vigra.taggedView(mask, 'zyx')
        role_data_dict["Prediction Mask"] = [ DatasetInfo(preloaded_array=mask) ]

    # Run the export via the BatchProcessingApplet
    export_paths = shell.workflow.batchProcessingApplet.run_export(role_data_dict, export_to_array=False)
    assert len(export_paths) == 1
    assert export_paths[0] == output_path + '/predictions', "Output path was {}".format(export_paths[0])
コード例 #41
0
def generate_trained_loo_project_file(
    project_path,
    name_loo_img
):
    """
    Create a new project file from scratch, add the given raw data files,
    inject the corresponding labels, configure the given feature selections,
    and (if provided) override the classifier type ('factory').

    Finally, request the classifier object from the pipeline (which forces training),
    and save the project.

    new_project_path: Where to save the new project file
    raw_data_paths: A list of paths to the raw data images to train with
    label_data_paths: A list of paths to the label image data to train with
    feature_selections: A matrix of bool, representing the selected features
    classifier_factory: Override the classifier type.  Must be a subclass of either:
                        - lazyflow.classifiers.LazyflowVectorwiseClassifierFactoryABC
                        - lazyflow.classifiers.LazyflowPixelwiseClassifierFactoryABC
    """
    import ilastik_main
    from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
    from lazyflow.roi import fullSlicing

    ##
    ## CREATE PROJECT
    ##

    # Manually configure the arguments to ilastik, as if they were parsed from the command line.
    # (Start with empty args and fill in below.)
    ilastik_args = ilastik_main.parse_args([])
    ilastik_args.project = project_path
    ilastik_args.headless = True
    ilastik_args.readonly = False

    shell = ilastik_main.main(ilastik_args)
    assert isinstance(shell.workflow, PixelClassificationWorkflow)

    ##
    ## CONFIGURE FILE PATHS
    ##

    data_selection_applet = shell.workflow.dataSelectionApplet

    opDataSelection = data_selection_applet.topLevelOperator

    existing_lanes = len(opDataSelection.DatasetGroup)
    # Not sure if assuming role_index = 0 is allways valid
    role_index = 0

    cur_lane = None
    for lane, dataset in enumerate(opDataSelection.DatasetGroup):
        dat = dataset[role_index][0].wait()[0]
        if dat.nickname == name_loo_img:
            cur_lane = lane
            break

    if cur_lane is None:
        raise ValueError(f'{name_loo_img} not found in project.')

    # Set delete the label fro this image by setting all labels to 0
    opPixelClassification = shell.workflow.pcApplet.topLevelOperator
    #label_input_slot = opPixelClassification.LabelInputs[cur_lane]
    #label_output_slot = opPixelClassification.LabelImages[cur_lane]
    #shape = label_output_slot.meta.shape
    #zero_labels = np.zeros(shape=shape, dtype=np.uint8)
    #label_input_slot[fullSlicing(shape)] = zero_labels
    #label_input_slot.setDirty()
    #label_output_slot.disconnect()
    #label_output_slot.setValue(zero_labels)
    #label_output_slot.setDirty()
    ##
    ## TRAIN CLASSIFIER
    ##

    # Make sure the caches in the pipeline are not 'frozen'.
    # (This is the equivalent of 'live update' mode in the GUI.)
    opPixelClassification.FreezePredictions.setValue(False)

    # Mark the classifier as dirty to force re-training it
    cur_labs = opPixelClassification.opTrain.Labels[cur_lane]
    up_lab=cur_labs.upstream_slot.upstream_slot.upstream_slot
    zero_labels = np.zeros(shape=up_lab.meta.shape, dtype=np.uint8)
    up_lab.setValue(zero_labels)
    up_lab.setDirty()
    #cur_labs.disconnect()
    #cur_labs.value[:] = 0
    #opPixelClassification.opTrain.ClassifierFactory.setDirty()
    # Request the classifier object from the pipeline.
    # This forces the pipeline to produce (train) the classifier.
    _ = opPixelClassification.Classifier.value

    ##
    ## SAVE PROJECT
    ##

    # save project file (includes the new classifier).
    shell.projectManager.saveProject(force_all_save=True)
コード例 #42
0
def runWorkflow(cluster_args):
    ilastik_main_args = ilastik_main.parser.parse_args([])
    # Copy relevant args from cluster cmdline options to ilastik_main cmdline options
    ilastik_main_args.headless = True
    ilastik_main_args.project = cluster_args.project
    ilastik_main_args.process_name = cluster_args.process_name

    # Nodes should not write to a common logfile.
    # Override with /dev/null
    if cluster_args._node_work_ is None:
        ilastik_main_args.logfile = cluster_args.logfile
    else:
        ilastik_main_args.logfile = "/dev/null"
    
    assert cluster_args.project is not None, "Didn't get a project file."
    
    # Read the config file
    configFilePath = cluster_args.option_config_file
    config = parseClusterConfigFile( configFilePath )

    # Update the monkey_patch settings
    ilastik.monkey_patches.apply_setting_dict( config.__dict__ )

    # Configure the thread count.
    # Nowadays, this is done via an environment variable setting for ilastik_main to detect.
    if cluster_args._node_work_ is not None and config.task_threadpool_size is not None:
        os.environ["LAZYFLOW_THREADS"] = str(config.task_threadpool_size)
    
    if cluster_args._node_work_ is not None and config.task_total_ram_mb is not None:
        os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(config.task_total_ram_mb)

    # Instantiate 'shell' by calling ilastik_main with our 
    shell = ilastik_main.main( ilastik_main_args )
    workflow = shell.projectManager.workflow

    # Attach cluster operators
    resultSlot = None
    finalOutputSlot = workflow.getHeadlessOutputSlot( config.output_slot_id )
    assert finalOutputSlot is not None

    clusterOperator = None
    try:
        if cluster_args._node_work_ is not None:
            clusterOperator, resultSlot = prepare_node_cluster_operator(config, cluster_args, finalOutputSlot)
        else:
            clusterOperator, resultSlot = prepare_master_cluster_operator(cluster_args, finalOutputSlot)
        
        # Get the result
        logger.info("Starting task")
        result = resultSlot[0].value # FIXME: The image index is hard-coded here.
    finally:
        logger.info("Cleaning up")
        global stop_background_tasks
        stop_background_tasks = True
        
        try:
            if clusterOperator is not None:
                clusterOperator.cleanUp()
        except:
            logger.error("Errors during cleanup.")

        try:
            logger.info("Closing project...")
            shell.closeCurrentProject()
        except:
            logger.error("Errors while closing project.")
    
    logger.info("FINISHED with result {}".format(result))
    if not result:
        logger.error( "FAILED TO COMPLETE!" )
コード例 #43
0
ファイル: train_headless.py プロジェクト: wedexyz/ilastik
def generate_trained_project_file(new_project_path,
                                  raw_data_paths,
                                  label_data_paths,
                                  feature_selections,
                                  classifier_factory=None):
    """
    Create a new project file from scratch, add the given raw data files,
    inject the corresponding labels, configure the given feature selections,
    and (if provided) override the classifier type ('factory').

    Finally, request the classifier object from the pipeline (which forces training),
    and save the project.

    new_project_path: Where to save the new project file
    raw_data_paths: A list of paths to the raw data images to train with
    label_data_paths: A list of paths to the label image data to train with
    feature_selections: A matrix of bool, representing the selected features
    classifier_factory: Override the classifier type.  Must be a subclass of either:
                        - lazyflow.classifiers.LazyflowVectorwiseClassifierFactoryABC
                        - lazyflow.classifiers.LazyflowPixelwiseClassifierFactoryABC
    """
    assert len(raw_data_paths) == len(
        label_data_paths
    ), "Number of label images must match number of raw images."

    import ilastik_main
    from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
    from lazyflow.graph import Graph
    from lazyflow.operators.ioOperators import OpInputDataReader
    from lazyflow.roi import roiToSlice, roiFromShape

    ##
    ## CREATE PROJECT
    ##

    # Manually configure the arguments to ilastik, as if they were parsed from the command line.
    # (Start with empty args and fill in below.)
    ilastik_args = ilastik_main.parse_args([])
    ilastik_args.new_project = new_project_path
    ilastik_args.headless = True
    ilastik_args.workflow = "Pixel Classification"

    shell = ilastik_main.main(ilastik_args)
    assert isinstance(shell.workflow, PixelClassificationWorkflow)

    ##
    ## CONFIGURE GRAYSCALE INPUT
    ##

    data_selection_applet = shell.workflow.dataSelectionApplet

    # To configure data selection, start with empty cmdline args and manually fill them in
    data_selection_args, _ = data_selection_applet.parse_known_cmdline_args(
        [], PixelClassificationWorkflow.ROLE_NAMES)
    data_selection_args.raw_data = raw_data_paths
    data_selection_args.preconvert_stacks = True

    # Simplest thing here is to configure using cmd-line interface
    data_selection_applet.configure_operator_with_parsed_args(
        data_selection_args)

    ##
    ## APPLY FEATURE MATRIX (from matrix above)
    ##

    opFeatures = shell.workflow.featureSelectionApplet.topLevelOperator
    opFeatures.Scales.setValue(ScalesList)
    opFeatures.FeatureIds.setValue(FeatureIds)
    opFeatures.SelectionMatrix.setValue(feature_selections)

    ##
    ## CUSTOMIZE CLASSIFIER TYPE
    ##

    opPixelClassification = shell.workflow.pcApplet.topLevelOperator
    if classifier_factory is not None:
        opPixelClassification.ClassifierFactory.setValue(classifier_factory)

    ##
    ## READ/APPLY LABEL VOLUMES
    ##

    # Read each label volume and inject the label data into the appropriate training slot
    cwd = os.getcwd()
    max_label_class = 0
    for lane, label_data_path in enumerate(label_data_paths):
        graph = Graph()
        opReader = OpInputDataReader(graph=graph)
        try:
            opReader.WorkingDirectory.setValue(cwd)
            opReader.FilePath.setValue(label_data_path)

            print("Reading label volume: {}".format(label_data_path))
            label_volume = opReader.Output[:].wait()
        finally:
            opReader.cleanUp()

        raw_shape = opPixelClassification.InputImages[lane].meta.shape
        if label_volume.ndim != len(raw_shape):
            # Append a singleton channel axis
            assert label_volume.ndim == len(raw_shape) - 1
            label_volume = label_volume[..., None]

        # Auto-calculate the max label value
        max_label_class = max(max_label_class, label_volume.max())

        print("Applying label volume to lane #{}".format(lane))
        entire_volume_slicing = roiToSlice(*roiFromShape(label_volume.shape))
        opPixelClassification.LabelInputs[lane][
            entire_volume_slicing] = label_volume

    assert max_label_class > 1, "Not enough label classes were found in your label data."
    label_names = list(map(str, list(range(max_label_class))))
    opPixelClassification.LabelNames.setValue(label_names)

    ##
    ## TRAIN CLASSIFIER
    ##

    # Make sure the caches in the pipeline are not 'frozen'.
    # (This is the equivalent of 'live update' mode in the GUI.)
    opPixelClassification.FreezePredictions.setValue(False)

    # Request the classifier object from the pipeline.
    # This forces the pipeline to produce (train) the classifier.
    _ = opPixelClassification.Classifier.value

    ##
    ## SAVE PROJECT
    ##

    # save project file (includes the new classifier).
    shell.projectManager.saveProject(force_all_save=False)
コード例 #44
0
ファイル: train_headless.py プロジェクト: slzephyr/ilastik
from lazyflow.graph import Graph
from lazyflow.operators.ioOperators import OpInputDataReader
from lazyflow.roi import roiToSlice, roiFromShape

##
## CREATE PROJECT
##

# Manually configure the arguments to ilastik, as if they were parsed from the command line.
# (Start with empty args and fill in below.)
ilastik_args = ilastik_main.parser.parse_args([])
ilastik_args.new_project = NEW_PROJECT_NAME
ilastik_args.headless = True
ilastik_args.workflow = 'Pixel Classification'

shell = ilastik_main.main(ilastik_args)
assert isinstance(shell.workflow, PixelClassificationWorkflow)

##
## CONFIGURE GRAYSCALE INPUT
##

data_selection_applet = shell.workflow.dataSelectionApplet

# To configure data selection, start with empty cmdline args and manually fill them in
data_selection_args, _ = data_selection_applet.parse_known_cmdline_args([])
data_selection_args.raw_data = RAW_DATA_FILEPATHS
data_selection_args.preconvert_stacks = True

# Configure
data_selection_applet.configure_operator_with_parsed_args(data_selection_args)
コード例 #45
0
from lazyflow.graph import Graph
from lazyflow.operators.ioOperators import OpInputDataReader
from lazyflow.roi import roiToSlice, roiFromShape

##
## CREATE PROJECT
##

# Manually configure the arguments to ilastik, as if they were parsed from the command line.
# (Start with empty args and fill in below.)
ilastik_args = ilastik_main.parser.parse_args([])
ilastik_args.new_project = NEW_PROJECT_NAME
ilastik_args.headless = True
ilastik_args.workflow = 'Pixel Classification'

shell = ilastik_main.main( ilastik_args )
assert isinstance(shell.workflow, PixelClassificationWorkflow)

##
## CONFIGURE GRAYSCALE INPUT
##

data_selection_applet = shell.workflow.dataSelectionApplet

# To configure data selection, start with empty cmdline args and manually fill them in
data_selection_args, _ = data_selection_applet.parse_known_cmdline_args([], PixelClassificationWorkflow.ROLE_NAMES)
data_selection_args.raw_data = RAW_DATA_FILEPATHS
data_selection_args.preconvert_stacks = True

# Configure 
data_selection_applet.configure_operator_with_parsed_args(data_selection_args)
コード例 #46
0
def classify_pixel_hdf(hdf_data_set_name, classifier, threads, ram):
    """
    Runs a pre-trained ilastik classifier on a volume of data given in an hdf5 file
    Adapted from Stuart Berg's example here:
    https://github.com/ilastik/ilastik/blob/master/examples/example_python_client.py
    
    Parameters:
        hdf_data_set_name: data to be classified - 3D numpy array
        classifier: ilastik trained/classified file
        threads: number of thread to use for classifying input data
        ram: RAM to use in MB
    
    Returns:
        pixel_out: The raw trained classifier
    """

    # Before we start ilastik, prepare these environment variable settings.
    os.environ["LAZYFLOW_THREADS"] = str(threads)
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(ram)

    # Set the command-line arguments directly into argparse.Namespace object
    # Provide your project file, and don't forget to specify headless.
    args = ilastik_main.parser.parse_args([])
    args.headless = True
    args.project = classifier

    # Instantiate the 'shell', (an instance of ilastik.shell.HeadlessShell)
    # This also loads the project file into shell.projectManager
    shell = ilastik_main.main(args)
    assert isinstance(shell.workflow, PixelClassificationWorkflow)

    # Obtain the training operator
    opPixelClassification = shell.workflow.pcApplet.topLevelOperator

    # Sanity checks
    assert len(opPixelClassification.InputImages) > 0
    assert opPixelClassification.Classifier.ready()

    # In case you're curious about which label class is which,
    # let's read the label names from the project file.
    label_names = opPixelClassification.LabelNames.value
    label_colors = opPixelClassification.LabelColors.value
    probability_colors = opPixelClassification.PmapColors.value

    print("label_names, label_colors, probability_colors", label_names,
          label_colors, probability_colors)

    # Construct an OrderedDict of role-names -> DatasetInfos
    # (See PixelClassificationWorkflow.ROLE_NAMES)
    data_info = DatasetInfo(hdf_data_set_name)
    data_info.axistags = vigra.defaultAxistags('tyx'.encode('ascii'))
    role_data_dict = OrderedDict([("Raw Data", [data_info])])
    # Run the export via the BatchProcessingApplet
    # Note: If you don't provide export_to_array, then the results will
    #       be exported to disk according to project's DataExport settings.
    #       In that case, run_export() returns None.

    hdf_dataset_path = shell.workflow.batchProcessingApplet.\
        run_export(role_data_dict, export_to_array=False)

    print("DONE WITH CLASSIFICATION.")

    return hdf_dataset_path