예제 #1
0
def main():

    parsed_args, workflow_cmdline_args = app.parse_known_args()

    hShell = app.main(parsed_args, workflow_cmdline_args)
    # in headless mode the headless shell is returned and its project manager still has an open project file
    hShell.closeCurrentProject()
    def testUsingPreloadedArryasWhenScriptingBatchProcessing(self):
        args = app.parse_args([])
        args.headless = True
        args.project = self.PROJECT_FILE
        shell = app.main(args)
        assert isinstance(shell.workflow, PixelClassificationWorkflow)

        # Obtain the training operator
        opPixelClassification = shell.workflow.pcApplet.topLevelOperator

        # Sanity checks
        assert len(opPixelClassification.InputImages) > 0
        assert opPixelClassification.Classifier.ready()

        input_data1 = numpy.random.randint(0, 255, (2, 20, 20, 5, 1)).astype(numpy.uint8)
        input_data2 = numpy.random.randint(0, 255, (2, 20, 20, 5, 1)).astype(numpy.uint8)

        role_data_dict = {
            "Raw Data": [
                PreloadedArrayDatasetInfo(preloaded_array=input_data1, axistags=vigra.AxisTags("tzyxc")),
                PreloadedArrayDatasetInfo(preloaded_array=input_data2, axistags=vigra.AxisTags("tzyxc")),
            ]
        }

        predictions = shell.workflow.batchProcessingApplet.run_export(role_data_dict, export_to_array=True)
        for result in predictions:
            assert result.shape == (2, 20, 20, 5, 2)
def analyze(conn, images, model, new_dataset, extension=".tar", resolution=0):
    # Prepare ilastik
    # temporary directory where to download files
    path = tempfile.mkdtemp()
    if not os.path.exists(path):
        os.makedirs(path)

    os.environ["LAZYFLOW_THREADS"] = "2"
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = "2000"
    args = app.parse_args([])
    args.headless = True
    args.project = model
    args.readonly = True
    shell = app.main(args)

    start = time.time()
    for image in images:
        input_data = load_from_s3(image, path)
        # run ilastik headless
        print('running ilastik using %s and %s' % (model, image.getName()))
        data = OrderedDict([(
            "Raw Data",
            [PreloadedArrayDatasetInfo(preloaded_array=input_data)],
        )])
        shell.workflow.batchProcessingApplet.run_export(
            data, export_to_array=True)  # noqa
    elapsed = time.time() - start
    print(elapsed)
예제 #4
0
    def start_workflow_create_project_headless(self, workflow_class_tuple,
                                               temp_dir):
        """Tests project file creation via the command line
        Args:
            workflow_class_tuple (tuple): tuple returned from getAvailableWorkflows
              with (workflow_class, workflow_name, workflow_class.workflowDisplayName)
        """
        workflow_class, workflow_name, display_name = workflow_class_tuple
        logger.debug(f"starting {workflow_name}")
        project_file = generate_project_file_name(temp_dir, workflow_name)

        args = [
            "--headless", f"--new_project={project_file}",
            f"--workflow={workflow_name}"
        ]
        # Clear the existing commandline args so it looks like we're starting fresh.
        sys.argv = ["ilastik.py"]
        sys.argv.extend(args)

        # Start up the ilastik.py entry script as if we had launched it from the command line
        parsed_args, workflow_cmdline_args = app.parse_known_args()

        shell = app.main(parsed_args=parsed_args,
                         workflow_cmdline_args=workflow_cmdline_args,
                         init_logging=False)

        shell.closeCurrentProject()

        # now check if the project file has been created:
        assert os.path.exists(
            project_file
        ), f"Project File {project_file} creation not successful"
예제 #5
0
def test_133_pc_oc_loading(project_path: pathlib.Path):
    args = ["--headless", f"--project={project_path}"]
    # Clear the existing commandline args so it looks like we're starting fresh.
    sys.argv = ["ilastik.py"]
    sys.argv.extend(args)

    # Start up the ilastik.py entry script as if we had launched it from the command line
    parsed_args, workflow_cmdline_args = app.parse_known_args()

    shell = app.main(parsed_args=parsed_args,
                     workflow_cmdline_args=workflow_cmdline_args,
                     init_logging=False)

    shell.closeCurrentProject()
예제 #6
0
def analyze(image_id, model):
    args = app.parse_args([])
    args.headless = True
    args.project = model
    args.readonly = True
    shell = app.main(args)
    input_data = load_from_s3(image_id)
    # run ilastik headless
    data = [{
        "Raw Data":
        PreloadedArrayDatasetInfo(preloaded_array=input_data,
                                  axistags=vigra.defaultAxistags("tzyxc"))
    }]  # noqa
    return shell.workflow.batchProcessingApplet.run_export(
        data, export_to_array=True)  # noqa
예제 #7
0
def analyze(conn, images, model, new_dataset):
    # Prepare ilastik
    os.environ["LAZYFLOW_THREADS"] = "2"
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = "2000"
    args = app.parse_args([])
    args.headless = True
    args.project = model
    shell = app.main(args)
    for image in images:
        input_data = load_numpy_array(image)
        # run ilastik headless
        print('running ilastik using %s and %s' % (model, image.getName()))
        data = [ {"Raw Data": PreloadedArrayDatasetInfo(preloaded_array=input_data, axistags=vigra.defaultAxistags("tzyxc"))}]  # noqa
        predictions = shell.workflow.batchProcessingApplet.run_export(data,
                                                                      export_to_array=True)  # noqa
        for d in predictions:
            save_results(conn, image, d, new_dataset)
예제 #8
0
def main():
    if "--clean_paths" in sys.argv:
        script_dir = pathlib.Path(__file__).parent
        ilastik_root = script_dir.parent.parent
        _clean_paths(ilastik_root)

    # Allow to start-up by double-clicking a project file.
    if len(sys.argv) == 2 and sys.argv[1].endswith(".ilp"):
        sys.argv.insert(1, "--project")

    arg_opts, env_vars = _parse_internal_config("internal-startup-options.cfg")
    sys.argv[1:1] = arg_opts
    os.environ.update(env_vars)

    from ilastik import app

    parsed_args, workflow_cmdline_args = app.parse_known_args()

    hShell = app.main(parsed_args, workflow_cmdline_args)
    # in headless mode the headless shell is returned and its project manager still has an open project file
    hShell.closeCurrentProject()
예제 #9
0
def generate_trained_project_file(new_project_path,
                                  raw_data_paths,
                                  label_data_paths,
                                  feature_selections,
                                  classifier_factory=None):
    """
    Create a new project file from scratch, add the given raw data files,
    inject the corresponding labels, configure the given feature selections,
    and (if provided) override the classifier type ('factory').

    Finally, request the classifier object from the pipeline (which forces training),
    and save the project.

    new_project_path: Where to save the new project file
    raw_data_paths: A list of paths to the raw data images to train with
    label_data_paths: A list of paths to the label image data to train with
    feature_selections: A matrix of bool, representing the selected features
    classifier_factory: Override the classifier type.  Must be a subclass of either:
                        - lazyflow.classifiers.LazyflowVectorwiseClassifierFactoryABC
                        - lazyflow.classifiers.LazyflowPixelwiseClassifierFactoryABC
    """
    assert len(raw_data_paths) == len(
        label_data_paths
    ), "Number of label images must match number of raw images."

    from ilastik import app
    from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
    from lazyflow.graph import Graph
    from lazyflow.operators.ioOperators import OpInputDataReader
    from lazyflow.roi import roiToSlice, roiFromShape

    ##
    ## CREATE PROJECT
    ##

    # Manually configure the arguments to ilastik, as if they were parsed from the command line.
    # (Start with empty args and fill in below.)
    ilastik_args = app.parse_args([])
    ilastik_args.new_project = new_project_path
    ilastik_args.headless = True
    ilastik_args.workflow = "Pixel Classification"

    shell = app.main(ilastik_args)
    assert isinstance(shell.workflow, PixelClassificationWorkflow)

    ##
    ## CONFIGURE GRAYSCALE INPUT
    ##

    data_selection_applet = shell.workflow.dataSelectionApplet

    # To configure data selection, start with empty cmdline args and manually fill them in
    data_selection_args, _ = data_selection_applet.parse_known_cmdline_args(
        [], PixelClassificationWorkflow.ROLE_NAMES)
    data_selection_args.raw_data = raw_data_paths
    data_selection_args.preconvert_stacks = True

    # Simplest thing here is to configure using cmd-line interface
    data_selection_applet.configure_operator_with_parsed_args(
        data_selection_args)

    ##
    ## APPLY FEATURE MATRIX (from matrix above)
    ##

    opFeatures = shell.workflow.featureSelectionApplet.topLevelOperator
    opFeatures.Scales.setValue(ScalesList)
    opFeatures.FeatureIds.setValue(FeatureIds)
    opFeatures.SelectionMatrix.setValue(feature_selections)

    ##
    ## CUSTOMIZE CLASSIFIER TYPE
    ##

    opPixelClassification = shell.workflow.pcApplet.topLevelOperator
    if classifier_factory is not None:
        opPixelClassification.ClassifierFactory.setValue(classifier_factory)

    ##
    ## READ/APPLY LABEL VOLUMES
    ##

    # Read each label volume and inject the label data into the appropriate training slot
    cwd = os.getcwd()
    max_label_class = 0
    for lane, label_data_path in enumerate(label_data_paths):
        graph = Graph()
        opReader = OpInputDataReader(graph=graph)
        try:
            opReader.WorkingDirectory.setValue(cwd)
            opReader.FilePath.setValue(label_data_path)

            print("Reading label volume: {}".format(label_data_path))
            label_volume = opReader.Output[:].wait()
        finally:
            opReader.cleanUp()

        raw_shape = opPixelClassification.InputImages[lane].meta.shape
        if label_volume.ndim != len(raw_shape):
            # Append a singleton channel axis
            assert label_volume.ndim == len(raw_shape) - 1
            label_volume = label_volume[..., None]

        # Auto-calculate the max label value
        max_label_class = max(max_label_class, label_volume.max())

        print("Applying label volume to lane #{}".format(lane))
        entire_volume_slicing = roiToSlice(*roiFromShape(label_volume.shape))
        opPixelClassification.LabelInputs[lane][
            entire_volume_slicing] = label_volume

    assert max_label_class > 1, "Not enough label classes were found in your label data."
    label_names = list(map(str, list(range(max_label_class))))
    opPixelClassification.LabelNames.setValue(label_names)

    ##
    ## TRAIN CLASSIFIER
    ##

    # Make sure the caches in the pipeline are not 'frozen'.
    # (This is the equivalent of 'live update' mode in the GUI.)
    opPixelClassification.FreezePredictions.setValue(False)

    # Request the classifier object from the pipeline.
    # This forces the pipeline to produce (train) the classifier.
    _ = opPixelClassification.Classifier.value

    ##
    ## SAVE PROJECT
    ##

    # save project file (includes the new classifier).
    shell.projectManager.saveProject(force_all_save=False)
def runWorkflow(cluster_args):
    ilastik_main_args = app.parse_args([])
    # Copy relevant args from cluster cmdline options to app cmdline options
    ilastik_main_args.headless = True
    ilastik_main_args.project = cluster_args.project
    ilastik_main_args.process_name = cluster_args.process_name

    # Nodes should not write to a common logfile.
    # Override with /dev/null
    if cluster_args._node_work_ is None:
        ilastik_main_args.logfile = cluster_args.logfile
    else:
        ilastik_main_args.logfile = "/dev/null"

    assert cluster_args.project is not None, "Didn't get a project file."

    # Read the config file
    configFilePath = cluster_args.option_config_file
    config = parseClusterConfigFile(configFilePath)

    # Update the monkey_patch settings
    ilastik.monkey_patches.apply_setting_dict(config.__dict__)

    # Configure the thread count.
    # Nowadays, this is done via an environment variable setting for app to detect.
    if cluster_args._node_work_ is not None and config.task_threadpool_size is not None:
        os.environ["LAZYFLOW_THREADS"] = str(config.task_threadpool_size)

    if cluster_args._node_work_ is not None and config.task_total_ram_mb is not None:
        os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(config.task_total_ram_mb)

    # Instantiate 'shell' by calling app with our
    shell = app.main(ilastik_main_args)
    workflow = shell.projectManager.workflow

    # Attach cluster operators
    resultSlot = None
    finalOutputSlot = workflow.getHeadlessOutputSlot(config.output_slot_id)
    assert finalOutputSlot is not None

    clusterOperator = None
    try:
        if cluster_args._node_work_ is not None:
            clusterOperator, resultSlot = prepare_node_cluster_operator(
                config, cluster_args, finalOutputSlot)
        else:
            clusterOperator, resultSlot = prepare_master_cluster_operator(
                cluster_args, finalOutputSlot)

        # Get the result
        logger.info("Starting task")
        result = resultSlot[
            0].value  # FIXME: The image index is hard-coded here.
    finally:
        logger.info("Cleaning up")
        global stop_background_tasks
        stop_background_tasks = True

        try:
            if clusterOperator is not None:
                clusterOperator.cleanUp()
        except:
            logger.error("Errors during cleanup.")

        try:
            logger.info("Closing project...")
            shell.closeCurrentProject()
        except:
            logger.error("Errors while closing project.")

    logger.info("FINISHED with result {}".format(result))
    if not result:
        logger.error("FAILED TO COMPLETE!")
예제 #11
0
from ilastik.applets.dataSelection import DatasetInfo, PreloadedArrayDatasetInfo
from ilastik.workflows.pixelClassification import PixelClassificationWorkflow

# Before we start ilastik, optionally prepare these environment variable settings.
os.environ["LAZYFLOW_THREADS"] = "2"
os.environ["LAZYFLOW_TOTAL_RAM_MB"] = "2000"

# Programmatically set the command-line arguments directly into the argparse.Namespace object
# Provide your project file, and don't forget to specify headless.
args = app.parse_args([])
args.headless = True
args.project = "/Users/bergs/MyProject.ilp"  # REPLACE WITH YOUR PROJECT FILE

# Instantiate the 'shell', (in this case, an instance of ilastik.shell.HeadlessShell)
# This also loads the project file into shell.projectManager
shell = app.main(args)
assert isinstance(shell.workflow, PixelClassificationWorkflow)

# Obtain the training operator
opPixelClassification = shell.workflow.pcApplet.topLevelOperator

# Sanity checks
assert len(opPixelClassification.InputImages) > 0
assert opPixelClassification.Classifier.ready()

# For this example, we'll use random input data to "batch process"
input_data1 = numpy.random.randint(0, 255, (200, 200, 1)).astype(numpy.uint8)
input_data2 = numpy.random.randint(0, 255, (300, 300, 1)).astype(numpy.uint8)
print(input_data1.shape)

# In this example, we're using 2D data (with an extra dimension for  channel).