def _append_lane(workflow, input_filepath, axisorder=None):
    """
    Add a lane to the project file for the given input file.

    If axisorder is given, override the default axisorder for
    the file and force the project to use the given one.

    Globstrings are supported, in which case the files are converted to HDF5 first.
    """
    # If the filepath is a globstring, convert the stack to h5  # todo: skip this?
    tmp_dir = tempfile.mkdtemp()
    input_filepath = DataSelectionApplet.convertStacksToH5([input_filepath],
                                                           tmp_dir)[0]

    try:
        os.rmdir(tmp_dir)
    except OSError as e:
        if e.errno == 39:
            logger.warning(
                'Temporary directory {} was populated: should be deleted')
        else:
            raise

    info = DatasetInfo()
    info.location = DatasetInfo.Location.FileSystem
    info.filePath = input_filepath

    comp = PathComponents(input_filepath)

    # Convert all (non-url) paths to absolute
    # (otherwise they are relative to the project file, which probably isn't what the user meant)
    if not isUrl(input_filepath):
        comp.externalPath = os.path.abspath(comp.externalPath)
        info.filePath = comp.totalPath()
    info.nickname = comp.filenameBase
    if axisorder:
        info.axistags = vigra.defaultAxistags(axisorder)

    logger.debug("adding lane: {}".format(info))

    opDataSelection = workflow.dataSelectionApplet.topLevelOperator

    # Add a lane
    num_lanes = len(opDataSelection.DatasetGroup) + 1
    logger.debug("num_lanes: {}".format(num_lanes))
    opDataSelection.DatasetGroup.resize(num_lanes)

    # Configure it.
    role_index = 0  # raw data
    opDataSelection.DatasetGroup[-1][role_index].setValue(info)

    workflow.handleNewLanesAdded()
def classify_pixel_hdf(hdf_data_set_name, classifier, threads, ram):
    
    """
    Interface function to Ilastik object classifier functions.function
    
    Runs a pre-trained ilastik classifier on a volume of data given in an hdf5 file
    Adapted from Stuart Berg's example here:
    https://github.com/ilastik/ilastik/blob/master/examples/example_python_client.py
    
    Parameters:
        hdf_data_set_name: dataset to be classified - 3D numpy array
        classifier: ilastik trained/classified file
        threads: number of thread to use for classifying input data
        ram: RAM to use in MB
    
    Returns:
        pixel_out: The probability maps for the classified pixels
    """
    
    # Before we start ilastik, prepare these environment variable settings.
    os.environ["LAZYFLOW_THREADS"] = str(threads)
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(ram)
    
    # Set the command-line arguments directly into argparse.Namespace object
    # Provide your project file, and don't forget to specify headless.
    args = ilastik_main.parser.parse_args([])
    args.headless = True
    args.project = classifier
    
    # Instantiate the 'shell', (an instance of ilastik.shell.HeadlessShell)
    # This also loads the project file into shell.projectManager
    shell = ilastik_main.main(args)
    assert isinstance(shell.workflow, PixelClassificationWorkflow)
    
    # Obtain the training operator
    opPixelClassification = shell.workflow.pcApplet.topLevelOperator
    
    # Sanity checks
    assert len(opPixelClassification.InputImages) > 0
    assert opPixelClassification.Classifier.ready()
    
    # In case you're curious about which label class is which,
    # let's read the label names from the project file.
    label_names = opPixelClassification.LabelNames.value
    label_colors = opPixelClassification.LabelColors.value
    probability_colors = opPixelClassification.PmapColors.value
    
    print("label_names, label_colors, probability_colors", label_names, label_colors, probability_colors)
    
    # Construct an OrderedDict of role-names -> DatasetInfos
    # (See PixelClassificationWorkflow.ROLE_NAMES)
    data_info = DatasetInfo(hdf_data_set_name)
    # Classifying a volume specified of dimensions of: slices, column and rows
    data_info.axistags = vigra.defaultAxistags('zyx'.encode('ascii')) 
    role_data_dict = OrderedDict([("Raw Data", [data_info])])
    # Run the export via the BatchProcessingApplet
    # Note: If you don't provide export_to_array, then the results will
    #       be exported to disk according to project's DataExport settings.
    #       In that case, run_export() returns None.
    
    hdf_dataset_path = shell.workflow.batchProcessingApplet.\
        run_export(role_data_dict, export_to_array=False)
    
    print("DONE WITH CLASSIFICATION.")
    
    return hdf_dataset_path
Beispiel #3
0
def classify_pixel_hdf(hdf_data_set_name, classifier, threads, ram):
    """
    Runs a pre-trained ilastik classifier on a volume of data given in an hdf5 file
    Adapted from Stuart Berg's example here:
    https://github.com/ilastik/ilastik/blob/master/examples/example_python_client.py
    
    Parameters:
        hdf_data_set_name: data to be classified - 3D numpy array
        classifier: ilastik trained/classified file
        threads: number of thread to use for classifying input data
        ram: RAM to use in MB
    
    Returns:
        pixel_out: The raw trained classifier
    """

    # Before we start ilastik, prepare these environment variable settings.
    os.environ["LAZYFLOW_THREADS"] = str(threads)
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(ram)

    # Set the command-line arguments directly into argparse.Namespace object
    # Provide your project file, and don't forget to specify headless.
    args = ilastik_main.parser.parse_args([])
    args.headless = True
    args.project = classifier

    # Instantiate the 'shell', (an instance of ilastik.shell.HeadlessShell)
    # This also loads the project file into shell.projectManager
    shell = ilastik_main.main(args)
    assert isinstance(shell.workflow, PixelClassificationWorkflow)

    # Obtain the training operator
    opPixelClassification = shell.workflow.pcApplet.topLevelOperator

    # Sanity checks
    assert len(opPixelClassification.InputImages) > 0
    assert opPixelClassification.Classifier.ready()

    # In case you're curious about which label class is which,
    # let's read the label names from the project file.
    label_names = opPixelClassification.LabelNames.value
    label_colors = opPixelClassification.LabelColors.value
    probability_colors = opPixelClassification.PmapColors.value

    print("label_names, label_colors, probability_colors", label_names,
          label_colors, probability_colors)

    # Construct an OrderedDict of role-names -> DatasetInfos
    # (See PixelClassificationWorkflow.ROLE_NAMES)
    data_info = DatasetInfo(hdf_data_set_name)
    data_info.axistags = vigra.defaultAxistags('tyx'.encode('ascii'))
    role_data_dict = OrderedDict([("Raw Data", [data_info])])
    # Run the export via the BatchProcessingApplet
    # Note: If you don't provide export_to_array, then the results will
    #       be exported to disk according to project's DataExport settings.
    #       In that case, run_export() returns None.

    hdf_dataset_path = shell.workflow.batchProcessingApplet.\
        run_export(role_data_dict, export_to_array=False)

    print("DONE WITH CLASSIFICATION.")

    return hdf_dataset_path