def testUsingPreloadedArryasWhenScriptingBatchProcessing(self):
        args = app.parse_args([])
        args.headless = True
        args.project = self.PROJECT_FILE
        shell = app.main(args)
        assert isinstance(shell.workflow, PixelClassificationWorkflow)

        # Obtain the training operator
        opPixelClassification = shell.workflow.pcApplet.topLevelOperator

        # Sanity checks
        assert len(opPixelClassification.InputImages) > 0
        assert opPixelClassification.Classifier.ready()

        input_data1 = numpy.random.randint(0, 255, (2, 20, 20, 5, 1)).astype(numpy.uint8)
        input_data2 = numpy.random.randint(0, 255, (2, 20, 20, 5, 1)).astype(numpy.uint8)

        role_data_dict = {
            "Raw Data": [
                PreloadedArrayDatasetInfo(preloaded_array=input_data1, axistags=vigra.AxisTags("tzyxc")),
                PreloadedArrayDatasetInfo(preloaded_array=input_data2, axistags=vigra.AxisTags("tzyxc")),
            ]
        }

        predictions = shell.workflow.batchProcessingApplet.run_export(role_data_dict, export_to_array=True)
        for result in predictions:
            assert result.shape == (2, 20, 20, 5, 2)
def analyze(conn, images, model, new_dataset, extension=".tar", resolution=0):
    # Prepare ilastik
    # temporary directory where to download files
    path = tempfile.mkdtemp()
    if not os.path.exists(path):
        os.makedirs(path)

    os.environ["LAZYFLOW_THREADS"] = "2"
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = "2000"
    args = app.parse_args([])
    args.headless = True
    args.project = model
    args.readonly = True
    shell = app.main(args)

    start = time.time()
    for image in images:
        input_data = load_from_s3(image, path)
        # run ilastik headless
        print('running ilastik using %s and %s' % (model, image.getName()))
        data = OrderedDict([(
            "Raw Data",
            [PreloadedArrayDatasetInfo(preloaded_array=input_data)],
        )])
        shell.workflow.batchProcessingApplet.run_export(
            data, export_to_array=True)  # noqa
    elapsed = time.time() - start
    print(elapsed)
Example #3
0
def analyze(image_id, model):
    args = app.parse_args([])
    args.headless = True
    args.project = model
    args.readonly = True
    shell = app.main(args)
    input_data = load_from_s3(image_id)
    # run ilastik headless
    data = [{
        "Raw Data":
        PreloadedArrayDatasetInfo(preloaded_array=input_data,
                                  axistags=vigra.defaultAxistags("tzyxc"))
    }]  # noqa
    return shell.workflow.batchProcessingApplet.run_export(
        data, export_to_array=True)  # noqa
Example #4
0
def analyze(conn, images, model, new_dataset):
    # Prepare ilastik
    os.environ["LAZYFLOW_THREADS"] = "2"
    os.environ["LAZYFLOW_TOTAL_RAM_MB"] = "2000"
    args = app.parse_args([])
    args.headless = True
    args.project = model
    shell = app.main(args)
    for image in images:
        input_data = load_numpy_array(image)
        # run ilastik headless
        print('running ilastik using %s and %s' % (model, image.getName()))
        data = [ {"Raw Data": PreloadedArrayDatasetInfo(preloaded_array=input_data, axistags=vigra.defaultAxistags("tzyxc"))}]  # noqa
        predictions = shell.workflow.batchProcessingApplet.run_export(data,
                                                                      export_to_array=True)  # noqa
        for d in predictions:
            save_results(conn, image, d, new_dataset)
Example #5
0
    def test_fake_data_source(self):
        graph = lazyflow.graph.Graph()
        reader = OperatorWrapper(OpDataSelection, graph=graph, operator_kwargs={"forceAxisOrder": False})
        reader.ProjectFile.setValue(self.projectFile)
        reader.WorkingDirectory.setValue(os.getcwd())
        reader.ProjectDataGroup.setValue("DataSelection/local_data")

        info = PreloadedArrayDatasetInfo(preloaded_array=self.imgData, axistags=vigra.defaultAxistags("tczyx"))

        reader.Dataset.setValues([info])

        # Verify that now data selection operator returns fake data
        # with expected shape and type
        imgData = reader.Image[0][...].wait()

        assert imgData.shape == self.imgData.shape
        assert imgData.dtype == self.imgData.dtype
        numpy.testing.assert_array_equal(imgData, imgData)
# In case you're curious about which label class is which,
# let's read the label names from the project file.
label_names = opPixelClassification.LabelNames.value
label_colors = opPixelClassification.LabelColors.value
probability_colors = opPixelClassification.PmapColors.value

print(label_names, label_colors, probability_colors)

# Construct an OrderedDict of role-names -> DatasetInfos
# (See PixelClassificationWorkflow.ROLE_NAMES)
role_data_dict = OrderedDict(
    [
        (
            "Raw Data",
            [
                PreloadedArrayDatasetInfo(preloaded_array=input_data1),
                PreloadedArrayDatasetInfo(preloaded_array=input_data2),
            ],
        )
    ]
)

## Note: If you want to pull your data from disk instead of in-memory, just provide filepaths like so:
# role_data_dict = OrderedDict([ ("Raw Data", [ '/path/to/input-file-1.png',
#                                               '/path/to/input-file-2.h5/mydata' ]) ])

# Run the export via the BatchProcessingApplet
# Note: If you don't provide export_to_array, then the results will
#       be exported to disk accordering to your project's DataExport settings.
#       In that case, run_export() returns None.
predictions = shell.workflow.batchProcessingApplet.run_export(role_data_dict, export_to_array=True)