def testUsingPreloadedArryasWhenScriptingBatchProcessing(self):
        args = app.parse_args([])
        args.headless = True
        args.project = self.PROJECT_FILE
        shell = app.main(args)
        assert isinstance(shell.workflow, PixelClassificationWorkflow)

        # Obtain the training operator
        opPixelClassification = shell.workflow.pcApplet.topLevelOperator

        # Sanity checks
        assert len(opPixelClassification.InputImages) > 0
        assert opPixelClassification.Classifier.ready()

        input_data1 = numpy.random.randint(0, 255, (2, 20, 20, 5, 1)).astype(numpy.uint8)
        input_data2 = numpy.random.randint(0, 255, (2, 20, 20, 5, 1)).astype(numpy.uint8)

        role_data_dict = {
            "Raw Data": [
                PreloadedArrayDatasetInfo(preloaded_array=input_data1, axistags=vigra.AxisTags("tzyxc")),
                PreloadedArrayDatasetInfo(preloaded_array=input_data2, axistags=vigra.AxisTags("tzyxc")),
            ]
        }

        predictions = shell.workflow.batchProcessingApplet.run_export(role_data_dict, export_to_array=True)
        for result in predictions:
            assert result.shape == (2, 20, 20, 5, 2)
def analyze(conn, images, model, new_dataset, extension=".tar", resolution=0):
    # Prepare ilastik
    # temporary directory where to download files
    path = tempfile.mkdtemp()
    if not os.path.exists(path):
        os.makedirs(path)

    os.environ["LAZYFLOW_THREADS"] = "2"
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = "2000"
    args = app.parse_args([])
    args.headless = True
    args.project = model
    args.readonly = True
    shell = app.main(args)

    start = time.time()
    for image in images:
        input_data = load_from_s3(image, path)
        # run ilastik headless
        print('running ilastik using %s and %s' % (model, image.getName()))
        data = OrderedDict([(
            "Raw Data",
            [PreloadedArrayDatasetInfo(preloaded_array=input_data)],
        )])
        shell.workflow.batchProcessingApplet.run_export(
            data, export_to_array=True)  # noqa
    elapsed = time.time() - start
    print(elapsed)
Esempio n. 3
0
def analyze(image_id, model):
    args = app.parse_args([])
    args.headless = True
    args.project = model
    args.readonly = True
    shell = app.main(args)
    input_data = load_from_s3(image_id)
    # run ilastik headless
    data = [{
        "Raw Data":
        PreloadedArrayDatasetInfo(preloaded_array=input_data,
                                  axistags=vigra.defaultAxistags("tzyxc"))
    }]  # noqa
    return shell.workflow.batchProcessingApplet.run_export(
        data, export_to_array=True)  # noqa
Esempio n. 4
0
def analyze(conn, images, model, new_dataset):
    # Prepare ilastik
    os.environ["LAZYFLOW_THREADS"] = "2"
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = "2000"
    args = app.parse_args([])
    args.headless = True
    args.project = model
    shell = app.main(args)
    for image in images:
        input_data = load_numpy_array(image)
        # run ilastik headless
        print('running ilastik using %s and %s' % (model, image.getName()))
        data = [ {"Raw Data": PreloadedArrayDatasetInfo(preloaded_array=input_data, axistags=vigra.defaultAxistags("tzyxc"))}]  # noqa
        predictions = shell.workflow.batchProcessingApplet.run_export(data,
                                                                      export_to_array=True)  # noqa
        for d in predictions:
            save_results(conn, image, d, new_dataset)
Esempio n. 5
0
def generate_trained_project_file(new_project_path,
                                  raw_data_paths,
                                  label_data_paths,
                                  feature_selections,
                                  classifier_factory=None):
    """
    Create a new project file from scratch, add the given raw data files,
    inject the corresponding labels, configure the given feature selections,
    and (if provided) override the classifier type ('factory').

    Finally, request the classifier object from the pipeline (which forces training),
    and save the project.

    new_project_path: Where to save the new project file
    raw_data_paths: A list of paths to the raw data images to train with
    label_data_paths: A list of paths to the label image data to train with
    feature_selections: A matrix of bool, representing the selected features
    classifier_factory: Override the classifier type.  Must be a subclass of either:
                        - lazyflow.classifiers.LazyflowVectorwiseClassifierFactoryABC
                        - lazyflow.classifiers.LazyflowPixelwiseClassifierFactoryABC
    """
    assert len(raw_data_paths) == len(
        label_data_paths
    ), "Number of label images must match number of raw images."

    from ilastik import app
    from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
    from lazyflow.graph import Graph
    from lazyflow.operators.ioOperators import OpInputDataReader
    from lazyflow.roi import roiToSlice, roiFromShape

    ##
    ## CREATE PROJECT
    ##

    # Manually configure the arguments to ilastik, as if they were parsed from the command line.
    # (Start with empty args and fill in below.)
    ilastik_args = app.parse_args([])
    ilastik_args.new_project = new_project_path
    ilastik_args.headless = True
    ilastik_args.workflow = "Pixel Classification"

    shell = app.main(ilastik_args)
    assert isinstance(shell.workflow, PixelClassificationWorkflow)

    ##
    ## CONFIGURE GRAYSCALE INPUT
    ##

    data_selection_applet = shell.workflow.dataSelectionApplet

    # To configure data selection, start with empty cmdline args and manually fill them in
    data_selection_args, _ = data_selection_applet.parse_known_cmdline_args(
        [], PixelClassificationWorkflow.ROLE_NAMES)
    data_selection_args.raw_data = raw_data_paths
    data_selection_args.preconvert_stacks = True

    # Simplest thing here is to configure using cmd-line interface
    data_selection_applet.configure_operator_with_parsed_args(
        data_selection_args)

    ##
    ## APPLY FEATURE MATRIX (from matrix above)
    ##

    opFeatures = shell.workflow.featureSelectionApplet.topLevelOperator
    opFeatures.Scales.setValue(ScalesList)
    opFeatures.FeatureIds.setValue(FeatureIds)
    opFeatures.SelectionMatrix.setValue(feature_selections)

    ##
    ## CUSTOMIZE CLASSIFIER TYPE
    ##

    opPixelClassification = shell.workflow.pcApplet.topLevelOperator
    if classifier_factory is not None:
        opPixelClassification.ClassifierFactory.setValue(classifier_factory)

    ##
    ## READ/APPLY LABEL VOLUMES
    ##

    # Read each label volume and inject the label data into the appropriate training slot
    cwd = os.getcwd()
    max_label_class = 0
    for lane, label_data_path in enumerate(label_data_paths):
        graph = Graph()
        opReader = OpInputDataReader(graph=graph)
        try:
            opReader.WorkingDirectory.setValue(cwd)
            opReader.FilePath.setValue(label_data_path)

            print("Reading label volume: {}".format(label_data_path))
            label_volume = opReader.Output[:].wait()
        finally:
            opReader.cleanUp()

        raw_shape = opPixelClassification.InputImages[lane].meta.shape
        if label_volume.ndim != len(raw_shape):
            # Append a singleton channel axis
            assert label_volume.ndim == len(raw_shape) - 1
            label_volume = label_volume[..., None]

        # Auto-calculate the max label value
        max_label_class = max(max_label_class, label_volume.max())

        print("Applying label volume to lane #{}".format(lane))
        entire_volume_slicing = roiToSlice(*roiFromShape(label_volume.shape))
        opPixelClassification.LabelInputs[lane][
            entire_volume_slicing] = label_volume

    assert max_label_class > 1, "Not enough label classes were found in your label data."
    label_names = list(map(str, list(range(max_label_class))))
    opPixelClassification.LabelNames.setValue(label_names)

    ##
    ## TRAIN CLASSIFIER
    ##

    # Make sure the caches in the pipeline are not 'frozen'.
    # (This is the equivalent of 'live update' mode in the GUI.)
    opPixelClassification.FreezePredictions.setValue(False)

    # Request the classifier object from the pipeline.
    # This forces the pipeline to produce (train) the classifier.
    _ = opPixelClassification.Classifier.value

    ##
    ## SAVE PROJECT
    ##

    # save project file (includes the new classifier).
    shell.projectManager.saveProject(force_all_save=False)
def runWorkflow(cluster_args):
    ilastik_main_args = app.parse_args([])
    # Copy relevant args from cluster cmdline options to app cmdline options
    ilastik_main_args.headless = True
    ilastik_main_args.project = cluster_args.project
    ilastik_main_args.process_name = cluster_args.process_name

    # Nodes should not write to a common logfile.
    # Override with /dev/null
    if cluster_args._node_work_ is None:
        ilastik_main_args.logfile = cluster_args.logfile
    else:
        ilastik_main_args.logfile = "/dev/null"

    assert cluster_args.project is not None, "Didn't get a project file."

    # Read the config file
    configFilePath = cluster_args.option_config_file
    config = parseClusterConfigFile(configFilePath)

    # Update the monkey_patch settings
    ilastik.monkey_patches.apply_setting_dict(config.__dict__)

    # Configure the thread count.
    # Nowadays, this is done via an environment variable setting for app to detect.
    if cluster_args._node_work_ is not None and config.task_threadpool_size is not None:
        os.environ["LAZYFLOW_THREADS"] = str(config.task_threadpool_size)

    if cluster_args._node_work_ is not None and config.task_total_ram_mb is not None:
        os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(config.task_total_ram_mb)

    # Instantiate 'shell' by calling app with our
    shell = app.main(ilastik_main_args)
    workflow = shell.projectManager.workflow

    # Attach cluster operators
    resultSlot = None
    finalOutputSlot = workflow.getHeadlessOutputSlot(config.output_slot_id)
    assert finalOutputSlot is not None

    clusterOperator = None
    try:
        if cluster_args._node_work_ is not None:
            clusterOperator, resultSlot = prepare_node_cluster_operator(
                config, cluster_args, finalOutputSlot)
        else:
            clusterOperator, resultSlot = prepare_master_cluster_operator(
                cluster_args, finalOutputSlot)

        # Get the result
        logger.info("Starting task")
        result = resultSlot[
            0].value  # FIXME: The image index is hard-coded here.
    finally:
        logger.info("Cleaning up")
        global stop_background_tasks
        stop_background_tasks = True

        try:
            if clusterOperator is not None:
                clusterOperator.cleanUp()
        except:
            logger.error("Errors during cleanup.")

        try:
            logger.info("Closing project...")
            shell.closeCurrentProject()
        except:
            logger.error("Errors while closing project.")

    logger.info("FINISHED with result {}".format(result))
    if not result:
        logger.error("FAILED TO COMPLETE!")
Esempio n. 7
0
from collections import OrderedDict

import numpy
import vigra

from ilastik import app
from ilastik.applets.dataSelection import DatasetInfo, PreloadedArrayDatasetInfo
from ilastik.workflows.pixelClassification import PixelClassificationWorkflow

# Before we start ilastik, optionally prepare these environment variable settings.
os.environ["LAZYFLOW_THREADS"] = "2"
os.environ["LAZYFLOW_TOTAL_RAM_MB"] = "2000"

# Programmatically set the command-line arguments directly into the argparse.Namespace object
# Provide your project file, and don't forget to specify headless.
args = app.parse_args([])
args.headless = True
args.project = "/Users/bergs/MyProject.ilp"  # REPLACE WITH YOUR PROJECT FILE

# Instantiate the 'shell', (in this case, an instance of ilastik.shell.HeadlessShell)
# This also loads the project file into shell.projectManager
shell = app.main(args)
assert isinstance(shell.workflow, PixelClassificationWorkflow)

# Obtain the training operator
opPixelClassification = shell.workflow.pcApplet.topLevelOperator

# Sanity checks
assert len(opPixelClassification.InputImages) > 0
assert opPixelClassification.Classifier.ready()