Exemplo n.º 1
0
    def test3DProjectLocalData(self, serializer, empty_project_file, graph):
        empty_project_file.create_group("DataSelection")
        empty_project_file["DataSelection"].create_group("local_data")
        empty_project_file["DataSelection/local_data"].create_dataset(
            "dataset1", data=self.imgData3Dc)
        info = ProjectInternalDatasetInfo(
            inner_path="DataSelection/local_data/dataset1",
            project_file=empty_project_file)

        projectInternalData = info.get_provider_slot(graph=graph)[...].wait()
        assert projectInternalData.shape == self.imgData3Dc.shape, (
            projectInternalData.shape, self.imgData3Dc.shape)
        assert (projectInternalData == self.imgData3Dc).all()

        for fileName in self.generatedImages3Dc:
            filesystem_info = FilesystemDatasetInfo(filePath=fileName)
            inner_path = filesystem_info.importAsLocalDataset(
                project_file=empty_project_file)
            info = ProjectInternalDatasetInfo(project_file=empty_project_file,
                                              inner_path=inner_path)

            projectInternalData = info.get_provider_slot(
                graph=graph)[...].wait()

            assert projectInternalData.shape == self.imgData3Dc.shape, (
                projectInternalData.shape,
                self.imgData3Dc.shape,
            )
            assert (projectInternalData == self.imgData3Dc).all()
Exemplo n.º 2
0
def test_immediate_accept_does_not_change_values(qtbot, png_image,
                                                 image_zyxc_stack_path,
                                                 empty_project_file):
    info_1 = FilesystemDatasetInfo(filePath=str(png_image),
                                   normalizeDisplay=False,
                                   project_file=empty_project_file)
    info_2 = FilesystemDatasetInfo(
        filePath=str(image_zyxc_stack_path),
        project_file=empty_project_file,
        sequence_axis="z",
        normalizeDisplay=True,
        drange=(56, 78),
    )
    project_file_dir = str(Path(png_image).parent)

    widget = create_and_modify_widget(qtbot, [info_1, info_2],
                                      project_file=empty_project_file)
    edited_infos = accept_widget(qtbot, widget)

    assert info_1.axiskeys == edited_infos[0].axiskeys == "yxc"
    assert info_2.axiskeys == edited_infos[1].axiskeys == "zyxc"

    assert info_1.normalizeDisplay == edited_infos[0].normalizeDisplay == False
    assert info_2.normalizeDisplay == edited_infos[1].normalizeDisplay == True
    assert info_2.drange == edited_infos[1].drange == (56, 78)
Exemplo n.º 3
0
def internal_datasetinfo(serializer, png_image) -> ProjectInternalDatasetInfo:
    project_file = serializer.topLevelOperator.ProjectFile.value
    filesystem_info = FilesystemDatasetInfo(filePath=png_image.as_posix())
    inner_path = filesystem_info.importAsLocalDataset(
        project_file=project_file)
    info = ProjectInternalDatasetInfo(inner_path=inner_path,
                                      project_file=project_file)
    return info
Exemplo n.º 4
0
def test_cannot_edit_axis_tags_on_images_of_different_dimensionality(
        qtbot, png_image, image_zyxc_stack_path, empty_project_file):
    info_1 = FilesystemDatasetInfo(filePath=str(png_image),
                                   project_file=empty_project_file)
    info_2 = FilesystemDatasetInfo(filePath=str(image_zyxc_stack_path),
                                   sequence_axis="z",
                                   project_file=empty_project_file)

    widget = create_and_modify_widget(qtbot, [info_1, info_2],
                                      project_file=empty_project_file)
    assert not widget.axesEdit.isEnabled()

    edited_infos = accept_widget(qtbot, widget)
    assert edited_infos[0].axiskeys == info_1.axiskeys and edited_infos[
        1].axiskeys == info_2.axiskeys
Exemplo n.º 5
0
def test_datasetinfo_editor_widget_shows_correct_data_on_multiple_info(
        qtbot, png_image, another_png_image, empty_project_file):
    info = FilesystemDatasetInfo(filePath=str(png_image),
                                 project_file=empty_project_file)
    info_2 = FilesystemDatasetInfo(filePath=str(another_png_image),
                                   project_file=empty_project_file)

    widget = create_and_modify_widget(qtbot=qtbot,
                                      infos=[info, info_2],
                                      project_file=empty_project_file)

    assert widget.axesEdit.maxLength() == 3
    assert "".join(tag.key for tag in widget.get_new_axes_tags()) == "yxc"
    assert not widget.nicknameEdit.isEnabled()
    assert widget.nicknameEdit.text(
    ) == Path(png_image).stem + ", " + Path(another_png_image).stem
Exemplo n.º 6
0
    def testBasic3DstacksFromFileList(self, empty_project_file):
        for ext, fileNames in list(self.imgFileLists2D.items()):
            fileNameString = os.path.pathsep.join(fileNames)
            reader = OperatorWrapper(OpDataSelection,
                                     graph=Graph(),
                                     operator_kwargs={"forceAxisOrder": False})
            reader.WorkingDirectory.setValue(
                str(Path(empty_project_file.filename).parent))

            reader.Dataset.setValues([
                FilesystemDatasetInfo(filePath=fileNameString,
                                      sequence_axis="z")
            ])

            # Read the test files using the data selection operator and verify the contents
            imgData3D = reader.Image[0][...].wait()

            # Check raw images
            assert imgData3D.shape == self.imgData3D.shape, (
                imgData3D.shape, self.imgData3D.shape)
            # skip this if image was saved compressed:
            if any(
                    x.strip(".") in ext.lower()
                    for x in self.compressedExtensions):
                print("Skipping raw comparison for compressed data: {}".format(
                    ext))
                continue
            numpy.testing.assert_array_equal(imgData3D, self.imgData3D)
Exemplo n.º 7
0
    def testNoChannelAxis(self):
        """
        If we add a dataset that is missing a channel axis altogether,
        the operator should automatically append a channel axis.
        """
        noChannelFilename = os.path.join(self.workingDir, "NoChannelAxis.npy")
        noChannelData = numpy.random.random((100, 100))
        numpy.save(noChannelFilename, noChannelData)

        info = FilesystemDatasetInfo(filePath=noChannelFilename, axistags=vigra.defaultAxistags("xy"))

        op = OpDataSelectionGroup(graph=Graph())
        op.WorkingDirectory.setValue(self.workingDir)
        op.DatasetRoles.setValue(["RoleA"])

        op.DatasetGroup.resize(1)
        op.DatasetGroup[0].setValue(info)

        assert op.ImageGroup[0].ready()

        # Note that we expect a channel axis to be appended to the data.
        expected_data = noChannelData[:, :, numpy.newaxis]
        data_from_op = op.ImageGroup[0][:].wait()

        assert data_from_op.dtype == expected_data.dtype
        assert data_from_op.shape == expected_data.shape
        assert (data_from_op == expected_data).all()

        # op.Image is a synonym for op.ImageGroup[0]
        assert op.Image.ready()
        assert (op.Image[:].wait() == expected_data).all()

        # Ensure that files opened by the inner operators are closed before we exit.
        op.DatasetGroup.resize(0)
        def impl():
            # Add a file
            from ilastik.applets.dataSelection.opDataSelection import DatasetInfo, FilesystemDatasetInfo

            info = FilesystemDatasetInfo(
                filePath=self.SAMPLE_DATA,
                project_file=self.shell.projectManager.currentProjectFile)
            opDataSelection.DatasetGroup.resize(1)
            opDataSelection.DatasetGroup[0][0].setValue(info)

            # Set some features
            opFeatures = workflow.featureSelectionApplet.topLevelOperator
            #                    sigma:   0.3    0.7    1.0    1.6    3.5    5.0   10.0
            selections = numpy.array([
                [True, False, False, False, False, False, False],
                [True, False, False, False, False, False, False],
                [True, False, False, False, False, False, False],
                [False, False, False, False, False, False, False],
                [False, False, False, False, False, False, False],
                [False, False, False, False, False, False, False],
            ])

            opFeatures.SelectionMatrix.setValue(selections)

            # Save and close
            self.shell.projectManager.saveProject()
            self.shell.ensureNoCurrentProject(assertClean=True)
Exemplo n.º 9
0
    def testBasic3DcStackFromGlobString(self, empty_project_file):
        """Test if stacked 2d 3-channel files are loaded correctly"""
        # For some reason vigra saves 2D+c data compressed in gifs, so skip!
        for fileName, nickname in zip(self.imgFileNameGlobs2Dc,
                                      self.imgFileNameGlobs2DcNicknames):
            reader = OperatorWrapper(OpDataSelection,
                                     graph=Graph(),
                                     operator_kwargs={"forceAxisOrder": False})
            reader.WorkingDirectory.setValue(
                str(Path(empty_project_file.filename).parent))

            reader.Dataset.setValues(
                [FilesystemDatasetInfo(filePath=fileName, sequence_axis="z")])

            # Read the test files using the data selection operator and verify the contents
            imgData3Dc = reader.Image[0][...].wait()

            # Check the file name output
            assert reader.ImageName[0].value == nickname
            # Check raw images
            assert imgData3Dc.shape == self.imgData3Dc.shape, (
                imgData3Dc.shape, self.imgData3Dc.shape)
            # skip this if image was saved compressed:
            if any(x in fileName.lower()
                   for x in self.compressedExtensions + [".gif"]):
                print("Skipping raw comparison for compressed data: {}".format(
                    fileName))
                continue
            numpy.testing.assert_array_equal(imgData3Dc, self.imgData3Dc)
Exemplo n.º 10
0
    def testWeirdAxisInfos(self):
        """
        If we add a dataset that has the channel axis in the wrong place,
        the operator should automatically transpose it to be last.
        """
        weirdAxisFilename = os.path.join(self.workingDir, "WeirdAxes.npy")
        expected_data = numpy.random.random((3, 100, 100))
        numpy.save(weirdAxisFilename, expected_data)

        info = FilesystemDatasetInfo(filePath=weirdAxisFilename, axistags=vigra.defaultAxistags("cxy"))

        op = OpDataSelectionGroup(graph=Graph(), forceAxisOrder=False)
        op.WorkingDirectory.setValue(self.workingDir)
        op.DatasetRoles.setValue(["RoleA"])

        op.DatasetGroup.resize(1)
        op.DatasetGroup[0].setValue(info)

        assert op.ImageGroup[0].ready()

        data_from_op = op.ImageGroup[0][:].wait()

        assert data_from_op.dtype == expected_data.dtype
        assert data_from_op.shape == expected_data.shape, (data_from_op.shape, expected_data.shape)
        assert (data_from_op == expected_data).all()

        # op.Image is a synonym for op.ImageGroup[0]
        assert op.Image.ready()
        assert (op.Image[:].wait() == expected_data).all()

        # Ensure that files opened by the inner operators are closed before we exit.
        op.DatasetGroup.resize(0)
Exemplo n.º 11
0
    def testBasic2Dc(self):
        """Test if 2d 3-channel files are loaded correctly"""
        # For some reason vigra saves 2D+c data compressed in gifs, so skip!
        self.compressedExtensions.append(".gif")
        for fileName in self.imgFileNames2Dc:
            graph = lazyflow.graph.Graph()
            reader = OperatorWrapper(OpDataSelection, graph=graph, operator_kwargs={"forceAxisOrder": False})
            reader.ProjectFile.setValue(self.projectFile)
            reader.WorkingDirectory.setValue(os.getcwd())

            info = FilesystemDatasetInfo(filePath=fileName)

            reader.Dataset.setValues([info])

            # Read the test files using the data selection operator and verify the contents
            imgData2Dc = reader.Image[0][...].wait()

            # Check the file name output
            assert reader.ImageName[0].value == self.create_nickname(fileName)
            # Check raw images
            assert imgData2Dc.shape == self.imgData2Dc.shape, (imgData2Dc.shape, self.imgData2Dc.shape)
            # skip this if image was saved compressed:
            if any(x in fileName.lower() for x in self.compressedExtensions):
                print("Skipping raw comparison for compressed data: {}".format(fileName))
                continue
            numpy.testing.assert_array_equal(imgData2Dc, self.imgData2Dc)
Exemplo n.º 12
0
    def testBasic3Dc(self):
        """Test if 2d 3-channel files are loaded correctly"""
        # For some reason vigra saves 2D+c data compressed in gifs, so skip!
        for fileName, nickname in zip(self.imgFileNames3Dc,
                                      self.imgFileNames3DcNicknames):
            graph = lazyflow.graph.Graph()
            reader = OperatorWrapper(OpDataSelection,
                                     graph=graph,
                                     operator_kwargs={"forceAxisOrder": False})
            reader.ProjectFile.setValue(self.projectFile)
            reader.WorkingDirectory.setValue(os.getcwd())
            reader.ProjectDataGroup.setValue("DataSelection/local_data")

            reader.Dataset.setValues(
                [FilesystemDatasetInfo(filePath=fileName)])

            # Read the test files using the data selection operator and verify the contents
            imgData3Dc = reader.Image[0][...].wait()

            # Check the file name output
            assert reader.ImageName[0].value == nickname
            # Check raw images
            assert imgData3Dc.shape == self.imgData3Dc.shape, (
                imgData3Dc.shape, self.imgData3Dc.shape)
            # skip this if image was saved compressed:
            numpy.testing.assert_array_equal(imgData3Dc, self.imgData3Dc)
        def impl():
            projFilePath = self.PROJECT_FILE
            shell = self.shell

            # New project
            shell.createAndLoadNewProject(projFilePath, self.workflowClass())
            workflow = shell.projectManager.workflow

            from ilastik.applets.dataSelection.opDataSelection import DatasetInfo, FilesystemDatasetInfo

            opDataSelection = workflow.dataSelectionApplet.topLevelOperator
            for i, dataFile in enumerate(self.SAMPLE_DATA):
                # Add a file
                info = FilesystemDatasetInfo(
                    filePath=dataFile,
                    project_file=self.shell.projectManager.currentProjectFile)

                opDataSelection.DatasetGroup.resize(i + 1)
                opDataSelection.DatasetGroup[i][0].setValue(info)

            # Set some features
            opFeatures = workflow.featureSelectionApplet.topLevelOperator
            #                    sigma:   0.3    0.7    1.0    1.6    3.5    5.0   10.0
            selections = numpy.array([
                [True, False, False, False, False, False, False],
                [True, False, False, False, False, False, False],
                [True, False, False, False, False, False, False],
                [False, False, False, False, False, False, False],
                [False, False, False, False, False, False, False],
                [False, False, False, False, False, False, False],
            ])
            opFeatures.SelectionMatrix.setValue(selections)
Exemplo n.º 14
0
    def export_dataset(
        self,
        role_inputs: List[Union[str, DatasetInfo]],
        input_axes: Optional[str] = None,
        export_to_array: bool = False,
        sequence_axis: Optional[str] = None,
        progress_callback: Optional[Callable[[int], None]] = None,
    ) -> Union[str, numpy.array]:
        """
        Configures a lane using the paths specified in the paths from role_inputs and runs the workflow.
        """
        progress_callback = progress_callback or self.progressSignal
        original_num_lanes = self.num_lanes
        previous_axes_tags = self.get_previous_axes_tags()
        # Call customization hook
        self.dataExportApplet.prepare_for_entire_export()
        # Add a lane to the end of the workflow for batch processing
        # (Expanding OpDataSelection by one has the effect of expanding the whole workflow.)
        self.dataSelectionApplet.topLevelOperator.addLane(self.num_lanes)
        batch_lane = self.dataSelectionApplet.topLevelOperator.getLane(
            self.num_lanes - 1)
        try:
            for role_index, (role_input, role_axis_tags) in enumerate(
                    zip(role_inputs, previous_axes_tags)):
                if not role_input:
                    continue
                if isinstance(role_input, DatasetInfo):
                    role_info = role_input
                else:
                    role_info = FilesystemDatasetInfo(
                        filePath=role_input,
                        project_file=None,
                        axistags=vigra.defaultAxistags(input_axes)
                        if input_axes else role_axis_tags,
                        sequence_axis=sequence_axis,
                        guess_tags_for_singleton_axes=
                        True,  # FIXME: add cmd line param to negate this
                    )
                batch_lane.DatasetGroup[role_index].setValue(role_info)
            self.workflow().handleNewLanesAdded()
            # Call customization hook
            self.dataExportApplet.prepare_lane_for_export(self.num_lanes - 1)
            opDataExport = self.dataExportApplet.topLevelOperator.getLane(
                self.num_lanes - 1)
            opDataExport.progressSignal.subscribe(progress_callback)
            if export_to_array:
                logger.info("Exporting to in-memory array.")
                result = opDataExport.run_export_to_array()
            else:
                logger.info(f"Exporting to {opDataExport.ExportPath.value}")
                opDataExport.run_export()
                result = opDataExport.ExportPath.value

            # Call customization hook
            self.dataExportApplet.post_process_lane_export(self.num_lanes - 1)
            return result
        finally:
            self.dataSelectionApplet.topLevelOperator.removeLane(
                original_num_lanes, original_num_lanes)
Exemplo n.º 15
0
    def _test_stack_along(self, name, extension, sequence_axis, expected):
        fileName = os.path.join(self.tmpdir, f"{name}{extension}")
        reader = OpDataSelection(graph=Graph(), forceAxisOrder=False)
        reader.WorkingDirectory.setValue(os.getcwd())
        reader.Dataset.setValue(FilesystemDatasetInfo(filePath=fileName, sequence_axis=sequence_axis))
        read = reader.Image[...].wait()

        assert numpy.allclose(read, expected), f"{name}: {read.shape}, {expected.shape}"
Exemplo n.º 16
0
    def testBasic(self, tmp_h5_single_dataset: Path):
        graph = Graph()
        opExport = OpDataExport(graph=graph)
        try:
            opExport.TransactionSlot.setValue(True)
            opExport.WorkingDirectory.setValue(self._tmpdir)

            rawInfo = FilesystemDatasetInfo(filePath=str(
                tmp_h5_single_dataset / "test_group/test_data"),
                                            nickname="test_nickname")

            opExport.RawDatasetInfo.setValue(rawInfo)

            opExport.SelectionNames.setValue(["Mock Export Data"])

            data = numpy.random.random((100, 100)).astype(numpy.float32) * 100
            data = vigra.taggedView(data, vigra.defaultAxistags("xy"))

            opExport.Inputs.resize(1)
            opExport.Inputs[0].setValue(data)

            sub_roi = [(10, 20), (90, 80)]
            opExport.RegionStart.setValue(sub_roi[0])
            opExport.RegionStop.setValue(sub_roi[1])

            opExport.ExportDtype.setValue(numpy.uint8)

            opExport.OutputFormat.setValue("hdf5")
            opExport.OutputFilenameFormat.setValue(
                "{dataset_dir}/{nickname}_export_x{x_start}-{x_stop}_y{y_start}-{y_stop}"
            )
            opExport.OutputInternalPath.setValue("volume/data")

            assert opExport.ImageToExport.ready()
            assert opExport.ExportPath.ready()

            expected_path = tmp_h5_single_dataset.parent.joinpath(
                rawInfo.nickname +
                "_export_x10-90_y20-80.h5/volume/data").as_posix()
            computed_path = opExport.ExportPath.value
            assert os.path.normpath(computed_path) == os.path.normpath(
                expected_path)
            opExport.run_export()
        finally:
            opExport.cleanUp()

        opRead = OpInputDataReader(graph=graph)
        try:
            opRead.FilePath.setValue(computed_path)

            # Compare with the correct subregion and convert dtype.
            expected_data = data.view(numpy.ndarray)[roiToSlice(*sub_roi)]
            expected_data = expected_data.astype(numpy.uint8)
            read_data = opRead.Output[:].wait()
            assert (read_data == expected_data
                    ).all(), "Read data didn't match exported data!"
        finally:
            opRead.cleanUp()
Exemplo n.º 17
0
    def testProjectLocalData(self, serializer, empty_project_file, graph):
        for fileName in self.generatedImages2Dc:
            # For some reason vigra saves 2D+c data compressed in gifs, so skip!
            if Path(fileName).suffix in self.compressedExtensions + [".gif"]:
                continue
            filesystem_info = FilesystemDatasetInfo(filePath=fileName)

            # From project
            inner_path = filesystem_info.importAsLocalDataset(project_file=empty_project_file)
            info = ProjectInternalDatasetInfo(project_file=empty_project_file, inner_path=inner_path)

            projectInternalData = info.get_provider_slot(graph=graph)[...].wait()

            assert projectInternalData.shape == self.imgData2Dc.shape, (
                projectInternalData.shape,
                self.imgData2Dc.shape,
            )
            assert (projectInternalData == self.imgData2Dc).all()
Exemplo n.º 18
0
    def test_real_data_source(self):
        reader = OperatorWrapper(OpDataSelection, graph=Graph(), operator_kwargs={"forceAxisOrder": False})
        reader.WorkingDirectory.setValue(os.getcwd())

        reader.Dataset.setValues([FilesystemDatasetInfo(filePath=self.testRawDataFileName)])

        # Read the test file using the data selection operator and verify the contents
        imgData = reader.Image[0][...].wait()

        assert imgData.shape == self.imgData.shape
        numpy.testing.assert_array_equal(imgData, self.imgData)
Exemplo n.º 19
0
        def impl():
            projFilePath = self.project_file
            shell = self.shell

            # New project
            shell.createAndLoadNewProject(projFilePath, self.workflowClass())
            workflow = shell.projectManager.workflow

            # Add our input files:
            opDataSelection = workflow.dataSelectionApplet.topLevelOperator
            opDataSelection.DatasetGroup.resize(1)
            info_raw = FilesystemDatasetInfo(
                filePath=self.sample_data_raw,
                project_file=self.shell.projectManager.currentProjectFile)
            opDataSelection.DatasetGroup[0][0].setValue(info_raw)
            info_prob = FilesystemDatasetInfo(filePath=self.sample_data_prob)
            info_raw.nickname = "test_data"
            opDataSelection.DatasetGroup[0][1].setValue(info_prob)

            # Save
            shell.projectManager.saveProject()
Exemplo n.º 20
0
    def test_load_single_file_with_glob(self):
        reader = OperatorWrapper(OpDataSelection, graph=Graph(), operator_kwargs={"forceAxisOrder": False})
        reader.WorkingDirectory.setValue(os.getcwd())

        reader.Dataset.setValues([FilesystemDatasetInfo(filePath=self.glob_string, sequence_axis="t")])

        # Read the test files using the data selection operator and verify the contents
        imgData = reader.Image[0][...].wait()

        # Check raw images
        assert imgData.shape == self.imgData3Dct.shape, (imgData.shape, self.imgData3Dct.shape)

        numpy.testing.assert_array_equal(imgData, self.imgData3Dct)
Exemplo n.º 21
0
    def test(self):
        """
        Make sure that the dataset roles work the way we expect them to.
        """
        infoA = FilesystemDatasetInfo(filePath=self.group1Data[0][0])
        infoC = FilesystemDatasetInfo(filePath=self.group1Data[1][0])

        op = OpDataSelectionGroup(graph=Graph())
        op.WorkingDirectory.setValue(self.workingDir)
        op.DatasetRoles.setValue(["RoleA", "RoleB", "RoleC"])

        op.DatasetGroup.resize(3)
        op.DatasetGroup[0].setValue(infoA)
        # Leave RoleB blank -- datasets other than the first are optional
        op.DatasetGroup[2].setValue(infoC)

        assert op.ImageGroup[0].ready()
        assert op.ImageGroup[2].ready()

        expectedDataA = self.group1Data[0][1]
        dataFromOpA = op.ImageGroup[0][:].wait()

        assert dataFromOpA.dtype == expectedDataA.dtype
        assert dataFromOpA.shape == expectedDataA.shape
        assert (dataFromOpA == expectedDataA).all()

        expectedDataC = self.group1Data[0][1]
        dataFromOpC = op.ImageGroup[0][:].wait()

        assert dataFromOpC.dtype == expectedDataC.dtype
        assert dataFromOpC.shape == expectedDataC.shape
        assert (dataFromOpC == expectedDataC).all()

        assert op.Image.ready()
        assert (op.Image[:].wait() == expectedDataA).all()

        # Ensure that files opened by the inner operators are closed before we exit.
        op.DatasetGroup.resize(0)
Exemplo n.º 22
0
def test_datasetinfo_editor_widget_shows_edits_data_on_multiple_infos_with_same_dimensionality(
        qtbot, png_image, another_png_image, empty_project_file):
    info_1 = FilesystemDatasetInfo(filePath=str(png_image),
                                   project_file=empty_project_file)
    info_2 = FilesystemDatasetInfo(filePath=str(another_png_image),
                                   project_file=empty_project_file)
    project_file_dir = str(Path(png_image).parent)

    widget = create_and_modify_widget(
        qtbot,
        [info_1, info_2],
        project_file=empty_project_file,
        axiskeys="cxy",
        display_mode="binary-mask",
        normalizeDisplay=True,
        drange=(20, 40),
    )

    edited_infos = accept_widget(qtbot, widget)
    assert all(info.axiskeys == "cxy" for info in edited_infos)
    assert all(info.display_mode == "binary-mask" for info in edited_infos)
    assert all(info.normalizeDisplay == True for info in edited_infos)
    assert all(info.drange == (20, 40) for info in edited_infos)
Exemplo n.º 23
0
        def impl():
            projFilePath = self.PROJECT_FILE

            shell = self.shell

            # New project
            shell.createAndLoadNewProject(projFilePath, self.workflowClass())
            workflow = shell.projectManager.workflow

            # Add a file
            from ilastik.applets.dataSelection.opDataSelection import DatasetInfo, FilesystemDatasetInfo

            info = FilesystemDatasetInfo(
                filePath=self.SAMPLE_DATA,
                project_file=self.shell.projectManager.currentProjectFile)
            opDataSelection = workflow.dataSelectionApplet.topLevelOperator
            opDataSelection.DatasetGroup.resize(1)
            opDataSelection.DatasetGroup[0][0].setValue(info)

            # Set some features
            opFeatures = workflow.featureSelectionApplet.topLevelOperator
            #                    sigma:   0.3    0.7    1.0    1.6    3.5    5.0   10.0
            selections = numpy.array([
                [True, False, False, False, False, False, False],
                [True, False, False, False, False, False, False],
                [True, False, False, False, False, False, False],
                [False, False, False, False, False, False, False],
                [False, False, False, False, False, False, False],
                [False, False, False, False, False, False, False],
            ])

            opFeatures.SelectionMatrix.setValue(selections)

            workflow = self.shell.projectManager.workflow
            countingClassApplet = workflow.countingApplet
            gui = countingClassApplet.getMultiLaneGui()
            opCount = countingClassApplet.topLevelOperator

            opCount.opTrain.Sigma.setValue(self.COUNTING_SIGMA)

            # Select the labeling drawer
            self.shell.setSelectedAppletDrawer(COUNTING_APPLET_INDEX)

            # Turn off the huds and so we can capture the raw image
            viewMenu = gui.currentGui().menus()[0]
            viewMenu.actionToggleAllHuds.trigger()

            # Save and close
            shell.projectManager.saveProject()
            shell.ensureNoCurrentProject(assertClean=True)
Exemplo n.º 24
0
    def test_load_single_file_with_list(self):
        reader = OperatorWrapper(OpDataSelection, graph=Graph(), operator_kwargs={"forceAxisOrder": False})
        reader.WorkingDirectory.setValue(os.getcwd())

        fileNameString = os.path.pathsep.join(self.file_names)
        info = FilesystemDatasetInfo(filePath=fileNameString, sequence_axis="t")

        reader.Dataset.setValues([info])

        # Read the test files using the data selection operator and verify the contents
        imgData = reader.Image[0][...].wait()
        print("imgData", reader.Image.meta.axistags, reader.Image.meta.original_axistags)

        # Check raw images
        assert imgData.shape == self.imgData3Dct.shape, (imgData.shape, self.imgData3Dct.shape)

        numpy.testing.assert_array_equal(imgData, self.imgData3Dct)
Exemplo n.º 25
0
    def testBasic3DWrongAxes(self):
        """Test if 3D file with intentionally wrong axes is rejected """
        for fileName in self.imgFileNames3D:
            graph = lazyflow.graph.Graph()
            reader = OperatorWrapper(OpDataSelection, graph=graph, operator_kwargs={"forceAxisOrder": False})
            reader.ProjectFile.setValue(self.projectFile)
            reader.WorkingDirectory.setValue(os.getcwd())
            reader.ProjectDataGroup.setValue("DataSelection/local_data")

            info = FilesystemDatasetInfo(filePath=fileName, axistags=vigra.defaultAxistags("tzyc"))

            try:
                reader.Dataset.setValues([info])
                assert False, "Should have thrown an exception!"
            except DatasetConstraintError:
                pass
            except:
                assert False, "Should have thrown a DatasetConstraintError!"
Exemplo n.º 26
0
def test_modify_axistags_in_stack(qtbot, png_image, another_png_image,
                                  empty_project_file):
    info = FilesystemDatasetInfo(filePath=str(png_image) + os.path.pathsep +
                                 str(another_png_image),
                                 sequence_axis="t")
    widget = create_and_modify_widget(qtbot,
                                      infos=[info],
                                      project_file=empty_project_file,
                                      axiskeys="zxyc")
    new_info = accept_widget(qtbot, widget)[0]
    assert new_info.axiskeys == "zxyc"

    widget2 = create_and_modify_widget(qtbot,
                                       infos=[new_info],
                                       project_file=empty_project_file,
                                       axiskeys="txyc",
                                       location=ProjectInternalDatasetInfo)
    new_info2 = accept_widget(qtbot, widget2)[0]
    assert new_info2.axiskeys == "txyc"
Exemplo n.º 27
0
def test_datasetinfo_editor_widget_shows_correct_data_on_single_info(
        qtbot, png_image, empty_project_file):
    info = FilesystemDatasetInfo(filePath=str(png_image),
                                 project_file=empty_project_file)
    assert info.axiskeys == "yxc"
    assert info.laneDtype == numpy.uint8
    assert info.laneShape == (100, 200, 1)

    editor_widget = create_and_modify_widget(qtbot, [info], empty_project_file)

    assert editor_widget.axesEdit.maxLength() == 3
    assert "".join(tag.key
                   for tag in editor_widget.get_new_axes_tags()) == "yxc"
    assert editor_widget.nicknameEdit.text() == Path(png_image).stem
    assert editor_widget.nicknameEdit.isEnabled()
    assert editor_widget.normalizeDisplayComboBox.isVisible()
    assert editor_widget.storageComboBox.isVisible()

    edited_info = accept_widget(qtbot, editor_widget)[0]
    assert editor_widget.edited_infos[0].axistags == info.axistags
Exemplo n.º 28
0
def test_datasetinfo_editor_widget_modifies_single_info(
        qtbot, png_image, empty_project_file):
    info = FilesystemDatasetInfo(filePath=str(png_image),
                                 project_file=empty_project_file)
    widget = create_and_modify_widget(
        qtbot,
        [info],
        project_file=empty_project_file,
        nickname="SOME_NICKNAME",
        axiskeys="xyc",
        normalizeDisplay=True,
        drange=(10, 20),
        display_mode="alpha-modulated",
        location=RelativeFilesystemDatasetInfo,
    )
    edited_info = accept_widget(qtbot, widget)[0]
    assert edited_info.axiskeys == "xyc"
    assert edited_info.nickname == "SOME_NICKNAME"
    assert edited_info.normalizeDisplay == True
    assert edited_info.drange == (10, 20)
    assert edited_info.display_mode == "alpha-modulated"
    assert isinstance(edited_info, RelativeFilesystemDatasetInfo)
    assert edited_info.filePath == Path(png_image).absolute().as_posix()
Exemplo n.º 29
0
def image_yxc_fs_info(png_image, empty_project_file):
    return FilesystemDatasetInfo(filePath=str(png_image),
                                 project_file=empty_project_file)
    def create_new_project(cls, project_file_path, dataset_path):
        # Instantiate 'shell'
        shell = HeadlessShell()

        # Create a blank project file and load it.
        newProjectFile = ProjectManager.createBlankProjectFile(project_file_path, PixelClassificationWorkflow, [])
        newProjectFile.close()
        shell.openProjectFile(project_file_path)
        workflow = shell.workflow

        # Add a file
        from ilastik.applets.dataSelection.opDataSelection import FilesystemDatasetInfo

        info = FilesystemDatasetInfo(filePath=dataset_path)
        opDataSelection = workflow.dataSelectionApplet.topLevelOperator
        opDataSelection.DatasetGroup.resize(1)
        opDataSelection.DatasetGroup[0][0].setValue(info)

        # Set some features
        ScalesList = [0.3, 0.7, 1, 1.6, 3.5, 5.0, 10.0]
        FeatureIds = [
            "GaussianSmoothing",
            "LaplacianOfGaussian",
            "StructureTensorEigenvalues",
            "HessianOfGaussianEigenvalues",
            "GaussianGradientMagnitude",
            "DifferenceOfGaussians",
        ]

        opFeatures = workflow.featureSelectionApplet.topLevelOperator
        opFeatures.Scales.setValue(ScalesList)
        opFeatures.FeatureIds.setValue(FeatureIds)

        #                    sigma:   0.3    0.7    1.0    1.6    3.5    5.0   10.0
        selections = numpy.array(
            [
                [True, False, False, False, False, False, False],
                [True, False, False, False, False, False, False],
                [True, False, False, False, False, False, False],
                [False, False, False, False, False, False, False],
                [False, False, False, False, False, False, False],
                [False, False, False, False, False, False, False],
            ]
        )
        opFeatures.SelectionMatrix.setValue(selections)

        # Add some labels directly to the operator
        opPixelClass = workflow.pcApplet.topLevelOperator

        opPixelClass.LabelNames.setValue(["Label 1", "Label 2"])

        slicing1 = sl[0:1, 0:10, 0:10, 0:1, 0:1]
        labels1 = 1 * numpy.ones(slicing2shape(slicing1), dtype=numpy.uint8)
        opPixelClass.LabelInputs[0][slicing1] = labels1

        slicing2 = sl[0:1, 0:10, 10:20, 0:1, 0:1]
        labels2 = 2 * numpy.ones(slicing2shape(slicing2), dtype=numpy.uint8)
        opPixelClass.LabelInputs[0][slicing2] = labels2

        # Train the classifier
        opPixelClass.FreezePredictions.setValue(False)
        _ = opPixelClass.Classifier.value

        # Save and close
        shell.projectManager.saveProject()
        del shell